2024-12-12 05:38:43,820 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-12 05:38:43,836 main DEBUG Took 0.014021 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-12 05:38:43,837 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-12 05:38:43,837 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-12 05:38:43,838 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-12 05:38:43,840 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 05:38:43,847 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-12 05:38:43,858 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 05:38:43,860 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 05:38:43,861 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 05:38:43,861 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 05:38:43,862 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 05:38:43,862 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 05:38:43,863 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 05:38:43,864 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 05:38:43,864 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 05:38:43,864 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 05:38:43,865 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 05:38:43,866 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 05:38:43,866 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 05:38:43,867 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 05:38:43,867 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 05:38:43,868 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 05:38:43,868 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 05:38:43,869 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 05:38:43,869 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 05:38:43,870 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 05:38:43,870 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 05:38:43,870 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 05:38:43,871 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 05:38:43,872 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 05:38:43,872 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 05:38:43,873 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-12 05:38:43,875 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 05:38:43,877 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-12 05:38:43,880 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-12 05:38:43,881 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-12 05:38:43,882 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-12 05:38:43,882 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-12 05:38:43,889 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-12 05:38:43,892 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-12 05:38:43,893 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-12 05:38:43,894 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-12 05:38:43,894 main DEBUG createAppenders(={Console}) 2024-12-12 05:38:43,895 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-12-12 05:38:43,895 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-12 05:38:43,895 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-12-12 05:38:43,896 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-12 05:38:43,896 main DEBUG OutputStream closed 2024-12-12 05:38:43,896 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-12 05:38:43,896 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-12 05:38:43,896 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-12-12 05:38:43,955 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-12 05:38:43,957 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-12 05:38:43,958 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-12 05:38:43,959 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-12 05:38:43,960 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-12 05:38:43,960 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-12 05:38:43,960 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-12 05:38:43,960 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-12 05:38:43,960 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-12 05:38:43,961 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-12 05:38:43,961 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-12 05:38:43,961 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-12 05:38:43,962 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-12 05:38:43,962 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-12 05:38:43,962 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-12 05:38:43,962 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-12 05:38:43,963 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-12 05:38:43,963 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-12 05:38:43,965 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-12 05:38:43,965 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-12-12 05:38:43,965 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-12 05:38:43,966 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-12-12T05:38:44,142 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7 2024-12-12 05:38:44,145 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-12 05:38:44,145 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-12T05:38:44,152 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-12-12T05:38:44,169 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-12T05:38:44,171 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/cluster_6310ccda-7b0a-c91e-05fe-5f1f04ecede1, deleteOnExit=true 2024-12-12T05:38:44,172 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-12T05:38:44,172 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/test.cache.data in system properties and HBase conf 2024-12-12T05:38:44,173 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/hadoop.tmp.dir in system properties and HBase conf 2024-12-12T05:38:44,173 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/hadoop.log.dir in system properties and HBase conf 2024-12-12T05:38:44,174 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-12T05:38:44,174 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-12T05:38:44,174 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-12T05:38:44,274 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-12T05:38:44,389 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-12T05:38:44,393 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-12T05:38:44,394 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-12T05:38:44,394 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-12T05:38:44,395 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-12T05:38:44,395 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-12T05:38:44,396 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-12T05:38:44,396 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-12T05:38:44,397 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-12T05:38:44,397 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-12T05:38:44,398 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/nfs.dump.dir in system properties and HBase conf 2024-12-12T05:38:44,398 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/java.io.tmpdir in system properties and HBase conf 2024-12-12T05:38:44,399 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-12T05:38:44,399 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-12T05:38:44,400 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-12T05:38:45,289 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-12T05:38:45,352 INFO [Time-limited test {}] log.Log(170): Logging initialized @2146ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-12T05:38:45,412 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T05:38:45,465 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T05:38:45,482 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T05:38:45,483 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T05:38:45,484 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-12T05:38:45,494 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T05:38:45,496 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/hadoop.log.dir/,AVAILABLE} 2024-12-12T05:38:45,497 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-12T05:38:45,650 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/java.io.tmpdir/jetty-localhost-41989-hadoop-hdfs-3_4_1-tests_jar-_-any-918342689297118437/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-12T05:38:45,655 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:41989} 2024-12-12T05:38:45,656 INFO [Time-limited test {}] server.Server(415): Started @2450ms 2024-12-12T05:38:46,100 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T05:38:46,106 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T05:38:46,107 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T05:38:46,108 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T05:38:46,108 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-12T05:38:46,108 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/hadoop.log.dir/,AVAILABLE} 2024-12-12T05:38:46,109 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-12T05:38:46,201 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f79ec76{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/java.io.tmpdir/jetty-localhost-36325-hadoop-hdfs-3_4_1-tests_jar-_-any-4599299468689443064/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T05:38:46,202 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:36325} 2024-12-12T05:38:46,202 INFO [Time-limited test {}] server.Server(415): Started @2996ms 2024-12-12T05:38:46,247 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-12T05:38:46,920 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/cluster_6310ccda-7b0a-c91e-05fe-5f1f04ecede1/dfs/data/data2/current/BP-341374527-172.17.0.2-1733981924895/current, will proceed with Du for space computation calculation, 2024-12-12T05:38:46,920 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/cluster_6310ccda-7b0a-c91e-05fe-5f1f04ecede1/dfs/data/data1/current/BP-341374527-172.17.0.2-1733981924895/current, will proceed with Du for space computation calculation, 2024-12-12T05:38:46,949 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-12T05:38:46,990 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7 2024-12-12T05:38:46,992 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x44d730a456a5e15d with lease ID 0xbb9ef5283bd5d9a4: Processing first storage report for DS-3f192626-04b3-4437-bd13-4837f50c1799 from datanode DatanodeRegistration(127.0.0.1:43689, datanodeUuid=0dcc0832-ff2b-4f87-9ae9-86a479069cdd, infoPort=45833, infoSecurePort=0, ipcPort=36577, storageInfo=lv=-57;cid=testClusterID;nsid=176446867;c=1733981924895) 2024-12-12T05:38:46,993 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x44d730a456a5e15d with lease ID 0xbb9ef5283bd5d9a4: from storage DS-3f192626-04b3-4437-bd13-4837f50c1799 node DatanodeRegistration(127.0.0.1:43689, datanodeUuid=0dcc0832-ff2b-4f87-9ae9-86a479069cdd, infoPort=45833, infoSecurePort=0, ipcPort=36577, storageInfo=lv=-57;cid=testClusterID;nsid=176446867;c=1733981924895), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-12T05:38:46,994 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x44d730a456a5e15d with lease ID 0xbb9ef5283bd5d9a4: Processing first storage report for DS-8960aa11-f5b4-47cd-bc75-a92d68c9f3d9 from datanode DatanodeRegistration(127.0.0.1:43689, datanodeUuid=0dcc0832-ff2b-4f87-9ae9-86a479069cdd, infoPort=45833, infoSecurePort=0, ipcPort=36577, storageInfo=lv=-57;cid=testClusterID;nsid=176446867;c=1733981924895) 2024-12-12T05:38:46,994 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x44d730a456a5e15d with lease ID 0xbb9ef5283bd5d9a4: from storage DS-8960aa11-f5b4-47cd-bc75-a92d68c9f3d9 node DatanodeRegistration(127.0.0.1:43689, datanodeUuid=0dcc0832-ff2b-4f87-9ae9-86a479069cdd, infoPort=45833, infoSecurePort=0, ipcPort=36577, storageInfo=lv=-57;cid=testClusterID;nsid=176446867;c=1733981924895), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-12T05:38:47,064 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/cluster_6310ccda-7b0a-c91e-05fe-5f1f04ecede1/zookeeper_0, clientPort=60303, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/cluster_6310ccda-7b0a-c91e-05fe-5f1f04ecede1/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/cluster_6310ccda-7b0a-c91e-05fe-5f1f04ecede1/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-12T05:38:47,075 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=60303 2024-12-12T05:38:47,089 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T05:38:47,093 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T05:38:47,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741825_1001 (size=7) 2024-12-12T05:38:47,722 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d with version=8 2024-12-12T05:38:47,723 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/hbase-staging 2024-12-12T05:38:47,827 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-12T05:38:48,054 INFO [Time-limited test {}] client.ConnectionUtils(129): master/83e80bf221ca:0 server-side Connection retries=45 2024-12-12T05:38:48,069 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T05:38:48,070 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-12T05:38:48,070 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-12T05:38:48,070 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T05:38:48,070 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-12T05:38:48,175 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-12T05:38:48,222 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-12T05:38:48,229 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-12T05:38:48,233 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-12T05:38:48,254 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 43202 (auto-detected) 2024-12-12T05:38:48,254 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-12T05:38:48,272 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:34751 2024-12-12T05:38:48,279 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T05:38:48,281 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T05:38:48,293 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:34751 connecting to ZooKeeper ensemble=127.0.0.1:60303 2024-12-12T05:38:48,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:347510x0, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-12T05:38:48,391 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34751-0x10018bf93040000 connected 2024-12-12T05:38:48,490 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-12T05:38:48,496 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T05:38:48,500 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-12T05:38:48,505 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34751 2024-12-12T05:38:48,505 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34751 2024-12-12T05:38:48,506 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34751 2024-12-12T05:38:48,507 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34751 2024-12-12T05:38:48,507 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34751 2024-12-12T05:38:48,515 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d, hbase.cluster.distributed=false 2024-12-12T05:38:48,567 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/83e80bf221ca:0 server-side Connection retries=45 2024-12-12T05:38:48,567 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T05:38:48,567 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-12T05:38:48,567 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-12T05:38:48,567 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T05:38:48,568 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-12T05:38:48,569 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-12T05:38:48,571 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-12T05:38:48,572 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:46457 2024-12-12T05:38:48,573 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-12T05:38:48,577 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-12T05:38:48,578 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T05:38:48,580 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T05:38:48,583 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:46457 connecting to ZooKeeper ensemble=127.0.0.1:60303 2024-12-12T05:38:48,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:464570x0, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-12T05:38:48,594 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:464570x0, quorum=127.0.0.1:60303, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-12T05:38:48,594 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46457-0x10018bf93040001 connected 2024-12-12T05:38:48,595 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46457-0x10018bf93040001, quorum=127.0.0.1:60303, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T05:38:48,596 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46457-0x10018bf93040001, quorum=127.0.0.1:60303, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-12T05:38:48,596 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46457 2024-12-12T05:38:48,597 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46457 2024-12-12T05:38:48,597 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46457 2024-12-12T05:38:48,598 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46457 2024-12-12T05:38:48,598 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46457 2024-12-12T05:38:48,600 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/83e80bf221ca,34751,1733981927819 2024-12-12T05:38:48,611 DEBUG [M:0;83e80bf221ca:34751 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;83e80bf221ca:34751 2024-12-12T05:38:48,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x10018bf93040001, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T05:38:48,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T05:38:48,616 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/83e80bf221ca,34751,1733981927819 2024-12-12T05:38:48,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-12T05:38:48,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x10018bf93040001, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-12T05:38:48,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:38:48,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x10018bf93040001, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:38:48,643 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-12T05:38:48,644 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/83e80bf221ca,34751,1733981927819 from backup master directory 2024-12-12T05:38:48,644 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-12T05:38:48,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x10018bf93040001, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T05:38:48,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/83e80bf221ca,34751,1733981927819 2024-12-12T05:38:48,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T05:38:48,656 WARN [master/83e80bf221ca:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-12T05:38:48,657 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=83e80bf221ca,34751,1733981927819 2024-12-12T05:38:48,659 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-12T05:38:48,660 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-12T05:38:48,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741826_1002 (size=42) 2024-12-12T05:38:49,131 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/hbase.id with ID: f645fc1c-6417-4a2d-ac1d-5129c0aa2a57 2024-12-12T05:38:49,177 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T05:38:49,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x10018bf93040001, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:38:49,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:38:49,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741827_1003 (size=196) 2024-12-12T05:38:49,683 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T05:38:49,685 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-12T05:38:49,698 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:49,701 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-12T05:38:49,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741828_1004 (size=1189) 2024-12-12T05:38:50,149 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/MasterData/data/master/store 2024-12-12T05:38:50,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741829_1005 (size=34) 2024-12-12T05:38:50,572 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-12T05:38:50,572 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:38:50,574 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-12T05:38:50,574 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T05:38:50,574 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T05:38:50,574 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-12T05:38:50,575 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T05:38:50,575 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T05:38:50,575 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-12T05:38:50,577 WARN [master/83e80bf221ca:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/MasterData/data/master/store/.initializing 2024-12-12T05:38:50,577 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/MasterData/WALs/83e80bf221ca,34751,1733981927819 2024-12-12T05:38:50,583 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-12T05:38:50,592 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=83e80bf221ca%2C34751%2C1733981927819, suffix=, logDir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/MasterData/WALs/83e80bf221ca,34751,1733981927819, archiveDir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/MasterData/oldWALs, maxLogs=10 2024-12-12T05:38:50,610 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/MasterData/WALs/83e80bf221ca,34751,1733981927819/83e80bf221ca%2C34751%2C1733981927819.1733981930596, exclude list is [], retry=0 2024-12-12T05:38:50,625 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43689,DS-3f192626-04b3-4437-bd13-4837f50c1799,DISK] 2024-12-12T05:38:50,628 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-12T05:38:50,658 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/MasterData/WALs/83e80bf221ca,34751,1733981927819/83e80bf221ca%2C34751%2C1733981927819.1733981930596 2024-12-12T05:38:50,659 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45833:45833)] 2024-12-12T05:38:50,660 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-12T05:38:50,660 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:38:50,663 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:38:50,664 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:38:50,700 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:38:50,718 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-12T05:38:50,721 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:38:50,724 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T05:38:50,724 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:38:50,727 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-12T05:38:50,728 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:38:50,729 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:38:50,729 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:38:50,731 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-12T05:38:50,731 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:38:50,732 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:38:50,733 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:38:50,735 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-12T05:38:50,735 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:38:50,736 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:38:50,739 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:38:50,740 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:38:50,748 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-12T05:38:50,752 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T05:38:50,757 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T05:38:50,758 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75070042, jitterRate=0.1186307966709137}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-12T05:38:50,762 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-12T05:38:50,763 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-12T05:38:50,787 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@442a4c24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:38:50,813 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-12T05:38:50,822 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-12T05:38:50,822 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-12T05:38:50,823 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-12T05:38:50,825 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-12T05:38:50,829 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 3 msec 2024-12-12T05:38:50,829 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-12T05:38:50,849 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-12T05:38:50,859 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-12T05:38:50,867 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-12T05:38:50,869 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-12T05:38:50,870 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-12T05:38:50,904 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-12T05:38:50,907 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-12T05:38:50,912 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-12T05:38:50,930 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-12T05:38:50,931 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-12T05:38:50,938 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-12T05:38:50,950 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-12T05:38:50,959 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-12T05:38:50,972 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x10018bf93040001, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-12T05:38:50,972 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-12T05:38:50,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x10018bf93040001, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:38:50,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:38:50,974 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=83e80bf221ca,34751,1733981927819, sessionid=0x10018bf93040000, setting cluster-up flag (Was=false) 2024-12-12T05:38:51,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x10018bf93040001, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:38:51,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:38:51,030 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-12T05:38:51,032 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=83e80bf221ca,34751,1733981927819 2024-12-12T05:38:51,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:38:51,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x10018bf93040001, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:38:51,076 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-12T05:38:51,077 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=83e80bf221ca,34751,1733981927819 2024-12-12T05:38:51,115 DEBUG [RS:0;83e80bf221ca:46457 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;83e80bf221ca:46457 2024-12-12T05:38:51,117 INFO [RS:0;83e80bf221ca:46457 {}] regionserver.HRegionServer(1008): ClusterId : f645fc1c-6417-4a2d-ac1d-5129c0aa2a57 2024-12-12T05:38:51,120 DEBUG [RS:0;83e80bf221ca:46457 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-12T05:38:51,132 DEBUG [RS:0;83e80bf221ca:46457 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-12T05:38:51,133 DEBUG [RS:0;83e80bf221ca:46457 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-12T05:38:51,144 DEBUG [RS:0;83e80bf221ca:46457 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-12T05:38:51,144 DEBUG [RS:0;83e80bf221ca:46457 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5395c27c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:38:51,147 DEBUG [RS:0;83e80bf221ca:46457 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@135e2c72, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=83e80bf221ca/172.17.0.2:0 2024-12-12T05:38:51,148 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-12T05:38:51,151 INFO [RS:0;83e80bf221ca:46457 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-12T05:38:51,151 INFO [RS:0;83e80bf221ca:46457 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-12T05:38:51,151 DEBUG [RS:0;83e80bf221ca:46457 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-12T05:38:51,153 INFO [RS:0;83e80bf221ca:46457 {}] regionserver.HRegionServer(3073): reportForDuty to master=83e80bf221ca,34751,1733981927819 with isa=83e80bf221ca/172.17.0.2:46457, startcode=1733981928566 2024-12-12T05:38:51,155 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-12T05:38:51,158 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-12T05:38:51,164 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 83e80bf221ca,34751,1733981927819 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-12T05:38:51,167 DEBUG [RS:0;83e80bf221ca:46457 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-12T05:38:51,168 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/83e80bf221ca:0, corePoolSize=5, maxPoolSize=5 2024-12-12T05:38:51,168 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/83e80bf221ca:0, corePoolSize=5, maxPoolSize=5 2024-12-12T05:38:51,169 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/83e80bf221ca:0, corePoolSize=5, maxPoolSize=5 2024-12-12T05:38:51,169 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/83e80bf221ca:0, corePoolSize=5, maxPoolSize=5 2024-12-12T05:38:51,169 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/83e80bf221ca:0, corePoolSize=10, maxPoolSize=10 2024-12-12T05:38:51,169 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/83e80bf221ca:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:38:51,169 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/83e80bf221ca:0, corePoolSize=2, maxPoolSize=2 2024-12-12T05:38:51,169 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/83e80bf221ca:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:38:51,172 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733981961171 2024-12-12T05:38:51,174 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-12T05:38:51,175 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-12T05:38:51,175 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-12T05:38:51,176 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-12T05:38:51,179 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-12T05:38:51,179 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-12T05:38:51,179 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:38:51,180 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-12T05:38:51,180 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-12T05:38:51,180 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-12T05:38:51,181 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-12T05:38:51,184 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-12T05:38:51,185 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-12T05:38:51,185 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-12T05:38:51,188 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-12T05:38:51,188 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-12T05:38:51,189 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/83e80bf221ca:0:becomeActiveMaster-HFileCleaner.large.0-1733981931189,5,FailOnTimeoutGroup] 2024-12-12T05:38:51,190 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/83e80bf221ca:0:becomeActiveMaster-HFileCleaner.small.0-1733981931190,5,FailOnTimeoutGroup] 2024-12-12T05:38:51,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741831_1007 (size=1039) 2024-12-12T05:38:51,190 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-12T05:38:51,191 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-12T05:38:51,193 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-12T05:38:51,193 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-12T05:38:51,200 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54355, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-12T05:38:51,205 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34751 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 83e80bf221ca,46457,1733981928566 2024-12-12T05:38:51,207 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34751 {}] master.ServerManager(486): Registering regionserver=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:51,219 DEBUG [RS:0;83e80bf221ca:46457 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d 2024-12-12T05:38:51,219 DEBUG [RS:0;83e80bf221ca:46457 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:45813 2024-12-12T05:38:51,219 DEBUG [RS:0;83e80bf221ca:46457 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-12T05:38:51,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-12T05:38:51,231 DEBUG [RS:0;83e80bf221ca:46457 {}] zookeeper.ZKUtil(111): regionserver:46457-0x10018bf93040001, quorum=127.0.0.1:60303, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/83e80bf221ca,46457,1733981928566 2024-12-12T05:38:51,231 WARN [RS:0;83e80bf221ca:46457 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-12T05:38:51,231 INFO [RS:0;83e80bf221ca:46457 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-12T05:38:51,231 DEBUG [RS:0;83e80bf221ca:46457 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/WALs/83e80bf221ca,46457,1733981928566 2024-12-12T05:38:51,233 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [83e80bf221ca,46457,1733981928566] 2024-12-12T05:38:51,245 DEBUG [RS:0;83e80bf221ca:46457 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-12T05:38:51,255 INFO [RS:0;83e80bf221ca:46457 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-12T05:38:51,265 INFO [RS:0;83e80bf221ca:46457 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-12T05:38:51,267 INFO [RS:0;83e80bf221ca:46457 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-12T05:38:51,268 INFO [RS:0;83e80bf221ca:46457 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T05:38:51,268 INFO [RS:0;83e80bf221ca:46457 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-12T05:38:51,274 INFO [RS:0;83e80bf221ca:46457 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-12T05:38:51,274 DEBUG [RS:0;83e80bf221ca:46457 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/83e80bf221ca:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:38:51,274 DEBUG [RS:0;83e80bf221ca:46457 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/83e80bf221ca:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:38:51,274 DEBUG [RS:0;83e80bf221ca:46457 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/83e80bf221ca:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:38:51,274 DEBUG [RS:0;83e80bf221ca:46457 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/83e80bf221ca:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:38:51,274 DEBUG [RS:0;83e80bf221ca:46457 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/83e80bf221ca:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:38:51,275 DEBUG [RS:0;83e80bf221ca:46457 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/83e80bf221ca:0, corePoolSize=2, maxPoolSize=2 2024-12-12T05:38:51,275 DEBUG [RS:0;83e80bf221ca:46457 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/83e80bf221ca:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:38:51,275 DEBUG [RS:0;83e80bf221ca:46457 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/83e80bf221ca:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:38:51,275 DEBUG [RS:0;83e80bf221ca:46457 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/83e80bf221ca:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:38:51,275 DEBUG [RS:0;83e80bf221ca:46457 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/83e80bf221ca:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:38:51,275 DEBUG [RS:0;83e80bf221ca:46457 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/83e80bf221ca:0, corePoolSize=1, maxPoolSize=1 2024-12-12T05:38:51,275 DEBUG [RS:0;83e80bf221ca:46457 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/83e80bf221ca:0, corePoolSize=3, maxPoolSize=3 2024-12-12T05:38:51,275 DEBUG [RS:0;83e80bf221ca:46457 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0, corePoolSize=3, maxPoolSize=3 2024-12-12T05:38:51,276 INFO [RS:0;83e80bf221ca:46457 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T05:38:51,276 INFO [RS:0;83e80bf221ca:46457 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T05:38:51,276 INFO [RS:0;83e80bf221ca:46457 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-12T05:38:51,276 INFO [RS:0;83e80bf221ca:46457 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-12T05:38:51,277 INFO [RS:0;83e80bf221ca:46457 {}] hbase.ChoreService(168): Chore ScheduledChore name=83e80bf221ca,46457,1733981928566-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-12T05:38:51,293 INFO [RS:0;83e80bf221ca:46457 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-12T05:38:51,294 INFO [RS:0;83e80bf221ca:46457 {}] hbase.ChoreService(168): Chore ScheduledChore name=83e80bf221ca,46457,1733981928566-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T05:38:51,310 INFO [RS:0;83e80bf221ca:46457 {}] regionserver.Replication(204): 83e80bf221ca,46457,1733981928566 started 2024-12-12T05:38:51,310 INFO [RS:0;83e80bf221ca:46457 {}] regionserver.HRegionServer(1767): Serving as 83e80bf221ca,46457,1733981928566, RpcServer on 83e80bf221ca/172.17.0.2:46457, sessionid=0x10018bf93040001 2024-12-12T05:38:51,311 DEBUG [RS:0;83e80bf221ca:46457 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-12T05:38:51,311 DEBUG [RS:0;83e80bf221ca:46457 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 83e80bf221ca,46457,1733981928566 2024-12-12T05:38:51,311 DEBUG [RS:0;83e80bf221ca:46457 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '83e80bf221ca,46457,1733981928566' 2024-12-12T05:38:51,311 DEBUG [RS:0;83e80bf221ca:46457 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-12T05:38:51,312 DEBUG [RS:0;83e80bf221ca:46457 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-12T05:38:51,313 DEBUG [RS:0;83e80bf221ca:46457 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-12T05:38:51,313 DEBUG [RS:0;83e80bf221ca:46457 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-12T05:38:51,313 DEBUG [RS:0;83e80bf221ca:46457 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 83e80bf221ca,46457,1733981928566 2024-12-12T05:38:51,313 DEBUG [RS:0;83e80bf221ca:46457 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '83e80bf221ca,46457,1733981928566' 2024-12-12T05:38:51,313 DEBUG [RS:0;83e80bf221ca:46457 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-12T05:38:51,314 DEBUG [RS:0;83e80bf221ca:46457 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-12T05:38:51,314 DEBUG [RS:0;83e80bf221ca:46457 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-12T05:38:51,314 INFO [RS:0;83e80bf221ca:46457 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-12T05:38:51,315 INFO [RS:0;83e80bf221ca:46457 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-12T05:38:51,425 INFO [RS:0;83e80bf221ca:46457 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-12T05:38:51,428 INFO [RS:0;83e80bf221ca:46457 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=83e80bf221ca%2C46457%2C1733981928566, suffix=, logDir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/WALs/83e80bf221ca,46457,1733981928566, archiveDir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/oldWALs, maxLogs=32 2024-12-12T05:38:51,442 DEBUG [RS:0;83e80bf221ca:46457 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/WALs/83e80bf221ca,46457,1733981928566/83e80bf221ca%2C46457%2C1733981928566.1733981931430, exclude list is [], retry=0 2024-12-12T05:38:51,446 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43689,DS-3f192626-04b3-4437-bd13-4837f50c1799,DISK] 2024-12-12T05:38:51,450 INFO [RS:0;83e80bf221ca:46457 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/WALs/83e80bf221ca,46457,1733981928566/83e80bf221ca%2C46457%2C1733981928566.1733981931430 2024-12-12T05:38:51,451 DEBUG [RS:0;83e80bf221ca:46457 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45833:45833)] 2024-12-12T05:38:51,595 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-12T05:38:51,596 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d 2024-12-12T05:38:51,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741833_1009 (size=32) 2024-12-12T05:38:52,015 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:38:52,019 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-12T05:38:52,024 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-12T05:38:52,024 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:38:52,026 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T05:38:52,026 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-12T05:38:52,029 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-12T05:38:52,029 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:38:52,030 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T05:38:52,031 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-12T05:38:52,034 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-12T05:38:52,034 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:38:52,035 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T05:38:52,036 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/meta/1588230740 2024-12-12T05:38:52,037 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/meta/1588230740 2024-12-12T05:38:52,040 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T05:38:52,042 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-12T05:38:52,046 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T05:38:52,047 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71761486, jitterRate=0.06932947039604187}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T05:38:52,049 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-12T05:38:52,049 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-12T05:38:52,049 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-12T05:38:52,049 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-12T05:38:52,049 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-12T05:38:52,049 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-12T05:38:52,050 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-12T05:38:52,051 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-12T05:38:52,053 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-12T05:38:52,053 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-12T05:38:52,057 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-12T05:38:52,063 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-12T05:38:52,065 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-12T05:38:52,218 DEBUG [83e80bf221ca:34751 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-12T05:38:52,230 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:52,237 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 83e80bf221ca,46457,1733981928566, state=OPENING 2024-12-12T05:38:52,280 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-12T05:38:52,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x10018bf93040001, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:38:52,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:38:52,290 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T05:38:52,290 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T05:38:52,293 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=83e80bf221ca,46457,1733981928566}] 2024-12-12T05:38:52,468 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:38:52,470 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-12T05:38:52,473 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46296, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-12T05:38:52,484 INFO [RS_OPEN_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-12T05:38:52,484 INFO [RS_OPEN_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-12T05:38:52,485 INFO [RS_OPEN_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-12T05:38:52,489 INFO [RS_OPEN_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=83e80bf221ca%2C46457%2C1733981928566.meta, suffix=.meta, logDir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/WALs/83e80bf221ca,46457,1733981928566, archiveDir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/oldWALs, maxLogs=32 2024-12-12T05:38:52,502 DEBUG [RS_OPEN_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/WALs/83e80bf221ca,46457,1733981928566/83e80bf221ca%2C46457%2C1733981928566.meta.1733981932490.meta, exclude list is [], retry=0 2024-12-12T05:38:52,506 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43689,DS-3f192626-04b3-4437-bd13-4837f50c1799,DISK] 2024-12-12T05:38:52,508 INFO [RS_OPEN_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/WALs/83e80bf221ca,46457,1733981928566/83e80bf221ca%2C46457%2C1733981928566.meta.1733981932490.meta 2024-12-12T05:38:52,508 DEBUG [RS_OPEN_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45833:45833)] 2024-12-12T05:38:52,509 DEBUG [RS_OPEN_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-12T05:38:52,510 DEBUG [RS_OPEN_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-12T05:38:52,554 DEBUG [RS_OPEN_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-12T05:38:52,558 INFO [RS_OPEN_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-12T05:38:52,563 DEBUG [RS_OPEN_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-12T05:38:52,563 DEBUG [RS_OPEN_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:38:52,563 DEBUG [RS_OPEN_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-12T05:38:52,563 DEBUG [RS_OPEN_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-12T05:38:52,567 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-12T05:38:52,568 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-12T05:38:52,568 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:38:52,569 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T05:38:52,569 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-12T05:38:52,571 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-12T05:38:52,571 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:38:52,571 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T05:38:52,572 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-12T05:38:52,573 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-12T05:38:52,573 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:38:52,574 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T05:38:52,576 DEBUG [RS_OPEN_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/meta/1588230740 2024-12-12T05:38:52,579 DEBUG [RS_OPEN_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/meta/1588230740 2024-12-12T05:38:52,581 DEBUG [RS_OPEN_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T05:38:52,584 DEBUG [RS_OPEN_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-12T05:38:52,586 INFO [RS_OPEN_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68522451, jitterRate=0.021064087748527527}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T05:38:52,588 DEBUG [RS_OPEN_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-12T05:38:52,595 INFO [RS_OPEN_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733981932464 2024-12-12T05:38:52,608 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:52,610 DEBUG [RS_OPEN_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-12T05:38:52,610 INFO [RS_OPEN_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-12T05:38:52,610 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 83e80bf221ca,46457,1733981928566, state=OPEN 2024-12-12T05:38:52,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x10018bf93040001, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-12T05:38:52,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-12T05:38:52,643 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T05:38:52,643 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T05:38:52,649 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-12T05:38:52,649 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=83e80bf221ca,46457,1733981928566 in 350 msec 2024-12-12T05:38:52,657 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-12T05:38:52,657 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 593 msec 2024-12-12T05:38:52,662 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.5510 sec 2024-12-12T05:38:52,662 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733981932662, completionTime=-1 2024-12-12T05:38:52,662 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-12T05:38:52,662 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-12T05:38:52,696 DEBUG [hconnection-0x3e8ab696-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:38:52,699 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46302, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:38:52,708 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-12T05:38:52,708 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733981992708 2024-12-12T05:38:52,708 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733982052708 2024-12-12T05:38:52,708 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 46 msec 2024-12-12T05:38:52,744 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83e80bf221ca,34751,1733981927819-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T05:38:52,744 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83e80bf221ca,34751,1733981927819-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T05:38:52,744 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83e80bf221ca,34751,1733981927819-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T05:38:52,745 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-83e80bf221ca:34751, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T05:38:52,746 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-12T05:38:52,751 DEBUG [master/83e80bf221ca:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-12T05:38:52,754 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-12T05:38:52,755 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-12T05:38:52,761 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-12T05:38:52,763 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T05:38:52,764 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:38:52,766 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T05:38:52,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741835_1011 (size=358) 2024-12-12T05:38:53,189 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => b675848e5b5abf83ab0aa0c34e08f9b3, NAME => 'hbase:namespace,,1733981932754.b675848e5b5abf83ab0aa0c34e08f9b3.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d 2024-12-12T05:38:53,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741836_1012 (size=42) 2024-12-12T05:38:53,601 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733981932754.b675848e5b5abf83ab0aa0c34e08f9b3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:38:53,601 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing b675848e5b5abf83ab0aa0c34e08f9b3, disabling compactions & flushes 2024-12-12T05:38:53,602 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733981932754.b675848e5b5abf83ab0aa0c34e08f9b3. 2024-12-12T05:38:53,602 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733981932754.b675848e5b5abf83ab0aa0c34e08f9b3. 2024-12-12T05:38:53,602 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733981932754.b675848e5b5abf83ab0aa0c34e08f9b3. after waiting 0 ms 2024-12-12T05:38:53,602 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733981932754.b675848e5b5abf83ab0aa0c34e08f9b3. 2024-12-12T05:38:53,602 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733981932754.b675848e5b5abf83ab0aa0c34e08f9b3. 2024-12-12T05:38:53,602 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for b675848e5b5abf83ab0aa0c34e08f9b3: 2024-12-12T05:38:53,607 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T05:38:53,617 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733981932754.b675848e5b5abf83ab0aa0c34e08f9b3.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733981933609"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733981933609"}]},"ts":"1733981933609"} 2024-12-12T05:38:53,637 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T05:38:53,639 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T05:38:53,642 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733981933639"}]},"ts":"1733981933639"} 2024-12-12T05:38:53,647 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-12T05:38:53,707 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=b675848e5b5abf83ab0aa0c34e08f9b3, ASSIGN}] 2024-12-12T05:38:53,710 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=b675848e5b5abf83ab0aa0c34e08f9b3, ASSIGN 2024-12-12T05:38:53,711 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=b675848e5b5abf83ab0aa0c34e08f9b3, ASSIGN; state=OFFLINE, location=83e80bf221ca,46457,1733981928566; forceNewPlan=false, retain=false 2024-12-12T05:38:53,863 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=b675848e5b5abf83ab0aa0c34e08f9b3, regionState=OPENING, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:53,873 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure b675848e5b5abf83ab0aa0c34e08f9b3, server=83e80bf221ca,46457,1733981928566}] 2024-12-12T05:38:54,029 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:38:54,039 INFO [RS_OPEN_PRIORITY_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733981932754.b675848e5b5abf83ab0aa0c34e08f9b3. 2024-12-12T05:38:54,039 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => b675848e5b5abf83ab0aa0c34e08f9b3, NAME => 'hbase:namespace,,1733981932754.b675848e5b5abf83ab0aa0c34e08f9b3.', STARTKEY => '', ENDKEY => ''} 2024-12-12T05:38:54,040 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace b675848e5b5abf83ab0aa0c34e08f9b3 2024-12-12T05:38:54,040 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733981932754.b675848e5b5abf83ab0aa0c34e08f9b3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:38:54,040 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for b675848e5b5abf83ab0aa0c34e08f9b3 2024-12-12T05:38:54,040 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for b675848e5b5abf83ab0aa0c34e08f9b3 2024-12-12T05:38:54,043 INFO [StoreOpener-b675848e5b5abf83ab0aa0c34e08f9b3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region b675848e5b5abf83ab0aa0c34e08f9b3 2024-12-12T05:38:54,046 INFO [StoreOpener-b675848e5b5abf83ab0aa0c34e08f9b3-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b675848e5b5abf83ab0aa0c34e08f9b3 columnFamilyName info 2024-12-12T05:38:54,046 DEBUG [StoreOpener-b675848e5b5abf83ab0aa0c34e08f9b3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:38:54,047 INFO [StoreOpener-b675848e5b5abf83ab0aa0c34e08f9b3-1 {}] regionserver.HStore(327): Store=b675848e5b5abf83ab0aa0c34e08f9b3/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:38:54,049 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/namespace/b675848e5b5abf83ab0aa0c34e08f9b3 2024-12-12T05:38:54,050 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/namespace/b675848e5b5abf83ab0aa0c34e08f9b3 2024-12-12T05:38:54,056 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for b675848e5b5abf83ab0aa0c34e08f9b3 2024-12-12T05:38:54,061 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/namespace/b675848e5b5abf83ab0aa0c34e08f9b3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T05:38:54,062 INFO [RS_OPEN_PRIORITY_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened b675848e5b5abf83ab0aa0c34e08f9b3; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69132259, jitterRate=0.03015093505382538}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T05:38:54,063 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for b675848e5b5abf83ab0aa0c34e08f9b3: 2024-12-12T05:38:54,065 INFO [RS_OPEN_PRIORITY_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733981932754.b675848e5b5abf83ab0aa0c34e08f9b3., pid=6, masterSystemTime=1733981934029 2024-12-12T05:38:54,068 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733981932754.b675848e5b5abf83ab0aa0c34e08f9b3. 2024-12-12T05:38:54,069 INFO [RS_OPEN_PRIORITY_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733981932754.b675848e5b5abf83ab0aa0c34e08f9b3. 2024-12-12T05:38:54,069 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=b675848e5b5abf83ab0aa0c34e08f9b3, regionState=OPEN, openSeqNum=2, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:54,077 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-12T05:38:54,078 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure b675848e5b5abf83ab0aa0c34e08f9b3, server=83e80bf221ca,46457,1733981928566 in 200 msec 2024-12-12T05:38:54,081 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-12T05:38:54,081 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=b675848e5b5abf83ab0aa0c34e08f9b3, ASSIGN in 370 msec 2024-12-12T05:38:54,082 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T05:38:54,083 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733981934082"}]},"ts":"1733981934082"} 2024-12-12T05:38:54,086 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-12T05:38:54,159 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T05:38:54,166 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.4030 sec 2024-12-12T05:38:54,166 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-12T05:38:54,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-12T05:38:54,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x10018bf93040001, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:38:54,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:38:54,206 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-12T05:38:54,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-12T05:38:54,240 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 34 msec 2024-12-12T05:38:54,252 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-12T05:38:54,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-12T05:38:54,285 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 32 msec 2024-12-12T05:38:54,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-12T05:38:54,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-12T05:38:54,331 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 5.674sec 2024-12-12T05:38:54,334 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-12T05:38:54,336 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-12T05:38:54,338 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-12T05:38:54,339 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-12T05:38:54,339 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-12T05:38:54,340 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83e80bf221ca,34751,1733981927819-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-12T05:38:54,341 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83e80bf221ca,34751,1733981927819-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-12T05:38:54,348 DEBUG [master/83e80bf221ca:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-12T05:38:54,348 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-12T05:38:54,349 INFO [master/83e80bf221ca:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83e80bf221ca,34751,1733981927819-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T05:38:54,426 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7e541e88 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e83c466 2024-12-12T05:38:54,427 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-12T05:38:54,441 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@305a704f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:38:54,444 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-12T05:38:54,444 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-12T05:38:54,453 DEBUG [hconnection-0x68773b0e-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:38:54,460 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46314, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:38:54,467 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=83e80bf221ca,34751,1733981927819 2024-12-12T05:38:54,478 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=134, ProcessCount=11, AvailableMemoryMB=14045 2024-12-12T05:38:54,488 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T05:38:54,490 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47158, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T05:38:54,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T05:38:54,516 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T05:38:54,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-12T05:38:54,520 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T05:38:54,521 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-12-12T05:38:54,521 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:38:54,523 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T05:38:54,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T05:38:54,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741837_1013 (size=963) 2024-12-12T05:38:54,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T05:38:54,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T05:38:54,945 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d 2024-12-12T05:38:54,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741838_1014 (size=53) 2024-12-12T05:38:55,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T05:38:55,355 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:38:55,356 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 61279763b720b7a9988338e6150d61c7, disabling compactions & flushes 2024-12-12T05:38:55,356 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:55,356 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:55,356 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. after waiting 0 ms 2024-12-12T05:38:55,356 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:55,356 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:55,356 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:38:55,358 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T05:38:55,358 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733981935358"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733981935358"}]},"ts":"1733981935358"} 2024-12-12T05:38:55,361 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T05:38:55,362 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T05:38:55,362 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733981935362"}]},"ts":"1733981935362"} 2024-12-12T05:38:55,365 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-12T05:38:55,414 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=61279763b720b7a9988338e6150d61c7, ASSIGN}] 2024-12-12T05:38:55,418 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=61279763b720b7a9988338e6150d61c7, ASSIGN 2024-12-12T05:38:55,422 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=61279763b720b7a9988338e6150d61c7, ASSIGN; state=OFFLINE, location=83e80bf221ca,46457,1733981928566; forceNewPlan=false, retain=false 2024-12-12T05:38:55,572 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=61279763b720b7a9988338e6150d61c7, regionState=OPENING, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:55,575 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566}] 2024-12-12T05:38:55,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T05:38:55,729 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:38:55,741 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:55,742 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} 2024-12-12T05:38:55,742 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 61279763b720b7a9988338e6150d61c7 2024-12-12T05:38:55,742 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:38:55,742 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 61279763b720b7a9988338e6150d61c7 2024-12-12T05:38:55,742 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 61279763b720b7a9988338e6150d61c7 2024-12-12T05:38:55,745 INFO [StoreOpener-61279763b720b7a9988338e6150d61c7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 61279763b720b7a9988338e6150d61c7 2024-12-12T05:38:55,748 INFO [StoreOpener-61279763b720b7a9988338e6150d61c7-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T05:38:55,749 INFO [StoreOpener-61279763b720b7a9988338e6150d61c7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 61279763b720b7a9988338e6150d61c7 columnFamilyName A 2024-12-12T05:38:55,749 DEBUG [StoreOpener-61279763b720b7a9988338e6150d61c7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:38:55,750 INFO [StoreOpener-61279763b720b7a9988338e6150d61c7-1 {}] regionserver.HStore(327): Store=61279763b720b7a9988338e6150d61c7/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:38:55,751 INFO [StoreOpener-61279763b720b7a9988338e6150d61c7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 61279763b720b7a9988338e6150d61c7 2024-12-12T05:38:55,753 INFO [StoreOpener-61279763b720b7a9988338e6150d61c7-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T05:38:55,753 INFO [StoreOpener-61279763b720b7a9988338e6150d61c7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 61279763b720b7a9988338e6150d61c7 columnFamilyName B 2024-12-12T05:38:55,754 DEBUG [StoreOpener-61279763b720b7a9988338e6150d61c7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:38:55,754 INFO [StoreOpener-61279763b720b7a9988338e6150d61c7-1 {}] regionserver.HStore(327): Store=61279763b720b7a9988338e6150d61c7/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:38:55,755 INFO [StoreOpener-61279763b720b7a9988338e6150d61c7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 61279763b720b7a9988338e6150d61c7 2024-12-12T05:38:55,757 INFO [StoreOpener-61279763b720b7a9988338e6150d61c7-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T05:38:55,758 INFO [StoreOpener-61279763b720b7a9988338e6150d61c7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 61279763b720b7a9988338e6150d61c7 columnFamilyName C 2024-12-12T05:38:55,758 DEBUG [StoreOpener-61279763b720b7a9988338e6150d61c7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:38:55,759 INFO [StoreOpener-61279763b720b7a9988338e6150d61c7-1 {}] regionserver.HStore(327): Store=61279763b720b7a9988338e6150d61c7/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:38:55,759 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:55,761 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7 2024-12-12T05:38:55,761 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7 2024-12-12T05:38:55,764 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T05:38:55,766 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 61279763b720b7a9988338e6150d61c7 2024-12-12T05:38:55,770 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T05:38:55,771 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 61279763b720b7a9988338e6150d61c7; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60635865, jitterRate=-0.09645520150661469}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T05:38:55,772 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:38:55,773 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., pid=11, masterSystemTime=1733981935729 2024-12-12T05:38:55,776 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:55,776 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:55,777 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=61279763b720b7a9988338e6150d61c7, regionState=OPEN, openSeqNum=2, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:55,783 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-12T05:38:55,784 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 in 204 msec 2024-12-12T05:38:55,786 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-12T05:38:55,786 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=61279763b720b7a9988338e6150d61c7, ASSIGN in 369 msec 2024-12-12T05:38:55,787 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T05:38:55,787 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733981935787"}]},"ts":"1733981935787"} 2024-12-12T05:38:55,789 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-12T05:38:55,802 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T05:38:55,806 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2870 sec 2024-12-12T05:38:56,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T05:38:56,650 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-12-12T05:38:56,659 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f6e36fe to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@e98ea32 2024-12-12T05:38:56,699 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b9fcedf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:38:56,704 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:38:56,709 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46324, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:38:56,713 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T05:38:56,716 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47162, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T05:38:56,724 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f343a4d to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12885408 2024-12-12T05:38:56,735 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9bd0964, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:38:56,736 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x22cb07dd to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72b32f98 2024-12-12T05:38:56,748 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18cb251d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:38:56,749 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x478bae6b to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4977266 2024-12-12T05:38:56,760 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45b55c24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:38:56,762 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5400112e to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6bbb5d8a 2024-12-12T05:38:56,773 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e52b42a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:38:56,775 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x38766d64 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@18603bb9 2024-12-12T05:38:56,786 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3883f7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:38:56,788 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x295cb1ac to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72e97e4b 2024-12-12T05:38:56,802 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12a1285d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:38:56,804 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x70267494 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@490457fd 2024-12-12T05:38:56,814 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@527c6d40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:38:56,816 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1d2a8e08 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c8de680 2024-12-12T05:38:56,827 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47fe2fa7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:38:56,829 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c915d17 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6f6b07e3 2024-12-12T05:38:56,840 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@595e9ebe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:38:56,847 DEBUG [hconnection-0x169ff269-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:38:56,848 DEBUG [hconnection-0x293c9f96-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:38:56,849 DEBUG [hconnection-0x7c3cfe41-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:38:56,849 DEBUG [hconnection-0x34907d80-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:38:56,849 DEBUG [hconnection-0x131c45f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:38:56,850 DEBUG [hconnection-0x32d9f7e6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:38:56,850 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46336, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:38:56,851 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46344, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:38:56,852 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46350, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:38:56,852 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46358, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:38:56,853 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46360, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:38:56,854 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:38:56,856 DEBUG [hconnection-0x11fa2cd8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:38:56,856 DEBUG [hconnection-0x47be79fd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:38:56,857 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46372, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:38:56,859 DEBUG [hconnection-0x522bb58c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:38:56,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-12-12T05:38:56,862 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46376, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:38:56,863 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:38:56,865 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46382, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:38:56,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T05:38:56,867 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:38:56,867 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46384, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:38:56,869 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:38:56,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 61279763b720b7a9988338e6150d61c7 2024-12-12T05:38:56,935 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 61279763b720b7a9988338e6150d61c7 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-12T05:38:56,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=A 2024-12-12T05:38:56,945 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:38:56,945 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=B 2024-12-12T05:38:56,945 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:38:56,945 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=C 2024-12-12T05:38:56,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:38:56,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T05:38:57,029 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:38:57,031 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T05:38:57,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:57,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:38:57,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:57,034 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:57,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:57,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:57,054 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:57,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733981997014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:57,058 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:57,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733981997018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:57,059 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:57,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733981997032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:57,063 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:57,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733981997055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:57,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:57,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733981997056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:57,065 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/7fd8efc5717744a89a110adc0a100cb1 is 50, key is test_row_0/A:col10/1733981936930/Put/seqid=0 2024-12-12T05:38:57,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741839_1015 (size=12001) 2024-12-12T05:38:57,116 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/7fd8efc5717744a89a110adc0a100cb1 2024-12-12T05:38:57,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T05:38:57,201 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:38:57,202 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T05:38:57,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:57,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:38:57,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:57,213 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:57,212 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/34300d3d452445469390ea34cbfb4d6a is 50, key is test_row_0/B:col10/1733981936930/Put/seqid=0 2024-12-12T05:38:57,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733981997211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:57,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:57,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733981997213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:57,214 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:57,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733981997213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:57,215 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:57,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733981997213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:57,212 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:57,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:57,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:57,218 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:57,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733981997213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:57,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741840_1016 (size=12001) 2024-12-12T05:38:57,240 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/34300d3d452445469390ea34cbfb4d6a 2024-12-12T05:38:57,250 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-12T05:38:57,251 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-12T05:38:57,252 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-12T05:38:57,285 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/eeb6670c0c1848a9a69bbcc516b09e61 is 50, key is test_row_0/C:col10/1733981936930/Put/seqid=0 2024-12-12T05:38:57,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741841_1017 (size=12001) 2024-12-12T05:38:57,370 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:38:57,371 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T05:38:57,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:57,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:38:57,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:57,378 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:57,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:57,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:57,421 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:57,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733981997418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:57,422 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:57,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733981997419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:57,423 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:57,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733981997421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:57,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:57,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733981997423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:57,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:57,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733981997418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:57,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T05:38:57,532 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:38:57,532 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T05:38:57,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:57,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:38:57,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:57,533 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:57,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:57,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:57,686 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:38:57,687 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T05:38:57,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:57,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:38:57,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:57,687 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:57,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:57,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:57,707 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/eeb6670c0c1848a9a69bbcc516b09e61 2024-12-12T05:38:57,723 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/7fd8efc5717744a89a110adc0a100cb1 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/7fd8efc5717744a89a110adc0a100cb1 2024-12-12T05:38:57,727 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:57,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733981997726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:57,727 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:57,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733981997726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:57,729 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:57,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733981997728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:57,731 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:57,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733981997730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:57,733 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:57,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733981997730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:57,745 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/7fd8efc5717744a89a110adc0a100cb1, entries=150, sequenceid=16, filesize=11.7 K 2024-12-12T05:38:57,749 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/34300d3d452445469390ea34cbfb4d6a as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/34300d3d452445469390ea34cbfb4d6a 2024-12-12T05:38:57,761 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/34300d3d452445469390ea34cbfb4d6a, entries=150, sequenceid=16, filesize=11.7 K 2024-12-12T05:38:57,764 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/eeb6670c0c1848a9a69bbcc516b09e61 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/eeb6670c0c1848a9a69bbcc516b09e61 2024-12-12T05:38:57,779 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/eeb6670c0c1848a9a69bbcc516b09e61, entries=150, sequenceid=16, filesize=11.7 K 2024-12-12T05:38:57,781 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 61279763b720b7a9988338e6150d61c7 in 845ms, sequenceid=16, compaction requested=false 2024-12-12T05:38:57,781 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:38:57,848 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:38:57,849 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T05:38:57,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:57,850 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 61279763b720b7a9988338e6150d61c7 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-12T05:38:57,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=A 2024-12-12T05:38:57,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:38:57,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=B 2024-12-12T05:38:57,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:38:57,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=C 2024-12-12T05:38:57,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:38:57,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/7bfc4824fbf74dc48e95b5318bae7890 is 50, key is test_row_0/A:col10/1733981937014/Put/seqid=0 2024-12-12T05:38:57,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741842_1018 (size=12001) 2024-12-12T05:38:57,901 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/7bfc4824fbf74dc48e95b5318bae7890 2024-12-12T05:38:57,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/8fba7b57810f4d5698645b5f6862a52e is 50, key is test_row_0/B:col10/1733981937014/Put/seqid=0 2024-12-12T05:38:57,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741843_1019 (size=12001) 2024-12-12T05:38:57,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T05:38:57,988 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/8fba7b57810f4d5698645b5f6862a52e 2024-12-12T05:38:58,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/2bd6b26e839e4882ad24a41a073973c9 is 50, key is test_row_0/C:col10/1733981937014/Put/seqid=0 2024-12-12T05:38:58,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741844_1020 (size=12001) 2024-12-12T05:38:58,074 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-12T05:38:58,220 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-12T05:38:58,220 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-12T05:38:58,223 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-12T05:38:58,223 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-12T05:38:58,225 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-12T05:38:58,225 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-12T05:38:58,225 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-12T05:38:58,225 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-12T05:38:58,226 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-12T05:38:58,226 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-12T05:38:58,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 61279763b720b7a9988338e6150d61c7 2024-12-12T05:38:58,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:38:58,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:58,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733981998254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:58,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:58,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733981998261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:58,268 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:58,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733981998261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:58,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:58,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733981998265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:58,272 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:58,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733981998265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:58,367 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:58,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733981998367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:58,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:58,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733981998370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:58,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:58,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733981998372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:58,377 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:58,377 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:58,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733981998374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:58,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733981998374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:58,421 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/2bd6b26e839e4882ad24a41a073973c9 2024-12-12T05:38:58,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/7bfc4824fbf74dc48e95b5318bae7890 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/7bfc4824fbf74dc48e95b5318bae7890 2024-12-12T05:38:58,451 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/7bfc4824fbf74dc48e95b5318bae7890, entries=150, sequenceid=38, filesize=11.7 K 2024-12-12T05:38:58,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/8fba7b57810f4d5698645b5f6862a52e as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/8fba7b57810f4d5698645b5f6862a52e 2024-12-12T05:38:58,468 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/8fba7b57810f4d5698645b5f6862a52e, entries=150, sequenceid=38, filesize=11.7 K 2024-12-12T05:38:58,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/2bd6b26e839e4882ad24a41a073973c9 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/2bd6b26e839e4882ad24a41a073973c9 2024-12-12T05:38:58,493 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/2bd6b26e839e4882ad24a41a073973c9, entries=150, sequenceid=38, filesize=11.7 K 2024-12-12T05:38:58,495 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 61279763b720b7a9988338e6150d61c7 in 645ms, sequenceid=38, compaction requested=false 2024-12-12T05:38:58,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:38:58,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:58,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-12T05:38:58,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-12T05:38:58,504 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-12T05:38:58,504 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6290 sec 2024-12-12T05:38:58,513 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 1.6510 sec 2024-12-12T05:38:58,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 61279763b720b7a9988338e6150d61c7 2024-12-12T05:38:58,574 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 61279763b720b7a9988338e6150d61c7 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-12T05:38:58,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=A 2024-12-12T05:38:58,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:38:58,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=B 2024-12-12T05:38:58,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:38:58,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=C 2024-12-12T05:38:58,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:38:58,585 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/cb9cb08bebde4a859ffa9820e89d1438 is 50, key is test_row_0/A:col10/1733981938260/Put/seqid=0 2024-12-12T05:38:58,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741845_1021 (size=14341) 2024-12-12T05:38:58,615 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/cb9cb08bebde4a859ffa9820e89d1438 2024-12-12T05:38:58,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:58,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733981998616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:58,626 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:58,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733981998617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:58,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:58,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733981998625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:58,630 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:58,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733981998626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:58,631 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:58,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733981998626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:58,637 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/83cd2c1cf4ae4a889004783760078328 is 50, key is test_row_0/B:col10/1733981938260/Put/seqid=0 2024-12-12T05:38:58,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741846_1022 (size=12001) 2024-12-12T05:38:58,664 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/83cd2c1cf4ae4a889004783760078328 2024-12-12T05:38:58,685 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/57889e3fc4ad453cb4acff90de1035bb is 50, key is test_row_0/C:col10/1733981938260/Put/seqid=0 2024-12-12T05:38:58,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741847_1023 (size=12001) 2024-12-12T05:38:58,725 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/57889e3fc4ad453cb4acff90de1035bb 2024-12-12T05:38:58,731 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:58,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733981998728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:58,732 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:58,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733981998730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:58,733 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:58,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733981998732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:58,738 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:58,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733981998733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:58,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:58,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733981998733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:58,740 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/cb9cb08bebde4a859ffa9820e89d1438 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/cb9cb08bebde4a859ffa9820e89d1438 2024-12-12T05:38:58,757 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/cb9cb08bebde4a859ffa9820e89d1438, entries=200, sequenceid=54, filesize=14.0 K 2024-12-12T05:38:58,760 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/83cd2c1cf4ae4a889004783760078328 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/83cd2c1cf4ae4a889004783760078328 2024-12-12T05:38:58,775 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/83cd2c1cf4ae4a889004783760078328, entries=150, sequenceid=54, filesize=11.7 K 2024-12-12T05:38:58,777 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/57889e3fc4ad453cb4acff90de1035bb as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/57889e3fc4ad453cb4acff90de1035bb 2024-12-12T05:38:58,787 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/57889e3fc4ad453cb4acff90de1035bb, entries=150, sequenceid=54, filesize=11.7 K 2024-12-12T05:38:58,789 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=127.47 KB/130530 for 61279763b720b7a9988338e6150d61c7 in 216ms, sequenceid=54, compaction requested=true 2024-12-12T05:38:58,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:38:58,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:38:58,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:38:58,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:38:58,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:38:58,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:38:58,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:38:58,792 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:38:58,792 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:38:58,796 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:38:58,796 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:38:58,798 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/A is initiating minor compaction (all files) 2024-12-12T05:38:58,798 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/B is initiating minor compaction (all files) 2024-12-12T05:38:58,798 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/A in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:58,798 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/B in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:58,799 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/7fd8efc5717744a89a110adc0a100cb1, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/7bfc4824fbf74dc48e95b5318bae7890, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/cb9cb08bebde4a859ffa9820e89d1438] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=37.4 K 2024-12-12T05:38:58,799 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/34300d3d452445469390ea34cbfb4d6a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/8fba7b57810f4d5698645b5f6862a52e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/83cd2c1cf4ae4a889004783760078328] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=35.2 K 2024-12-12T05:38:58,801 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7fd8efc5717744a89a110adc0a100cb1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733981936916 2024-12-12T05:38:58,801 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 34300d3d452445469390ea34cbfb4d6a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733981936916 2024-12-12T05:38:58,802 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7bfc4824fbf74dc48e95b5318bae7890, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733981937014 2024-12-12T05:38:58,805 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 8fba7b57810f4d5698645b5f6862a52e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733981937014 2024-12-12T05:38:58,805 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb9cb08bebde4a859ffa9820e89d1438, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733981938246 2024-12-12T05:38:58,807 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 83cd2c1cf4ae4a889004783760078328, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733981938254 2024-12-12T05:38:58,853 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#B#compaction#9 average throughput is 0.82 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:38:58,854 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/825d981f56644557b214efb2caa1132c is 50, key is test_row_0/B:col10/1733981938260/Put/seqid=0 2024-12-12T05:38:58,855 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#A#compaction#10 average throughput is 0.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:38:58,856 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/1c364734fe5a455a8c013596fb07fef5 is 50, key is test_row_0/A:col10/1733981938260/Put/seqid=0 2024-12-12T05:38:58,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741848_1024 (size=12104) 2024-12-12T05:38:58,884 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/825d981f56644557b214efb2caa1132c as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/825d981f56644557b214efb2caa1132c 2024-12-12T05:38:58,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741849_1025 (size=12104) 2024-12-12T05:38:58,907 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 61279763b720b7a9988338e6150d61c7/B of 61279763b720b7a9988338e6150d61c7 into 825d981f56644557b214efb2caa1132c(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:38:58,907 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:38:58,908 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/B, priority=13, startTime=1733981938792; duration=0sec 2024-12-12T05:38:58,908 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:38:58,909 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:B 2024-12-12T05:38:58,909 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:38:58,918 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:38:58,918 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/C is initiating minor compaction (all files) 2024-12-12T05:38:58,918 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/C in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:58,919 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/eeb6670c0c1848a9a69bbcc516b09e61, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/2bd6b26e839e4882ad24a41a073973c9, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/57889e3fc4ad453cb4acff90de1035bb] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=35.2 K 2024-12-12T05:38:58,919 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/1c364734fe5a455a8c013596fb07fef5 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/1c364734fe5a455a8c013596fb07fef5 2024-12-12T05:38:58,921 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting eeb6670c0c1848a9a69bbcc516b09e61, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733981936916 2024-12-12T05:38:58,925 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 2bd6b26e839e4882ad24a41a073973c9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733981937014 2024-12-12T05:38:58,927 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 57889e3fc4ad453cb4acff90de1035bb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733981938254 2024-12-12T05:38:58,934 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 61279763b720b7a9988338e6150d61c7/A of 61279763b720b7a9988338e6150d61c7 into 1c364734fe5a455a8c013596fb07fef5(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:38:58,935 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:38:58,935 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/A, priority=13, startTime=1733981938791; duration=0sec 2024-12-12T05:38:58,935 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:38:58,935 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:A 2024-12-12T05:38:58,941 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 61279763b720b7a9988338e6150d61c7 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-12T05:38:58,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 61279763b720b7a9988338e6150d61c7 2024-12-12T05:38:58,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=A 2024-12-12T05:38:58,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:38:58,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=B 2024-12-12T05:38:58,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:38:58,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=C 2024-12-12T05:38:58,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:38:58,955 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/d433d93dd1dc43338ab34de56047dfbc is 50, key is test_row_0/A:col10/1733981938622/Put/seqid=0 2024-12-12T05:38:58,971 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:58,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733981998962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:58,977 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:58,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733981998968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:58,978 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:58,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733981998970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:58,979 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:58,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733981998971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:58,980 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:58,980 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#C#compaction#12 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:38:58,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733981998972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:58,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T05:38:58,982 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-12-12T05:38:58,986 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:38:58,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-12-12T05:38:58,988 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/7cb0303fe8524b67be4595951fa85546 is 50, key is test_row_0/C:col10/1733981938260/Put/seqid=0 2024-12-12T05:38:58,990 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:38:58,993 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:38:58,993 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:38:58,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-12T05:38:58,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741850_1026 (size=16681) 2024-12-12T05:38:58,998 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/d433d93dd1dc43338ab34de56047dfbc 2024-12-12T05:38:59,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741851_1027 (size=12104) 2024-12-12T05:38:59,018 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/a27bbf8f0f6a412ba848fe808ee17ebd is 50, key is test_row_0/B:col10/1733981938622/Put/seqid=0 2024-12-12T05:38:59,030 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/7cb0303fe8524b67be4595951fa85546 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/7cb0303fe8524b67be4595951fa85546 2024-12-12T05:38:59,048 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 61279763b720b7a9988338e6150d61c7/C of 61279763b720b7a9988338e6150d61c7 into 7cb0303fe8524b67be4595951fa85546(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:38:59,048 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:38:59,048 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/C, priority=13, startTime=1733981938792; duration=0sec 2024-12-12T05:38:59,048 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:38:59,048 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:C 2024-12-12T05:38:59,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741852_1028 (size=12001) 2024-12-12T05:38:59,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:59,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733981999074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,084 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:59,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733981999080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,084 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:59,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733981999081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:59,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733981999082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,086 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:59,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733981999082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-12T05:38:59,146 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,147 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-12T05:38:59,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:59,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:38:59,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:59,147 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:59,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:59,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:59,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:59,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733981999279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,292 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:59,292 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:59,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733981999287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,292 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:59,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733981999289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733981999287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:59,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733981999289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-12T05:38:59,300 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,301 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-12T05:38:59,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:59,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:38:59,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:59,301 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:59,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:59,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:59,452 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/a27bbf8f0f6a412ba848fe808ee17ebd 2024-12-12T05:38:59,455 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,455 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-12T05:38:59,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:59,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:38:59,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:59,456 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:59,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:59,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:59,483 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/3b8a0ad771f14bd19ef5bd32141f1472 is 50, key is test_row_0/C:col10/1733981938622/Put/seqid=0 2024-12-12T05:38:59,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741853_1029 (size=12001) 2024-12-12T05:38:59,499 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/3b8a0ad771f14bd19ef5bd32141f1472 2024-12-12T05:38:59,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/d433d93dd1dc43338ab34de56047dfbc as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/d433d93dd1dc43338ab34de56047dfbc 2024-12-12T05:38:59,527 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/d433d93dd1dc43338ab34de56047dfbc, entries=250, sequenceid=80, filesize=16.3 K 2024-12-12T05:38:59,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/a27bbf8f0f6a412ba848fe808ee17ebd as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/a27bbf8f0f6a412ba848fe808ee17ebd 2024-12-12T05:38:59,542 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/a27bbf8f0f6a412ba848fe808ee17ebd, entries=150, sequenceid=80, filesize=11.7 K 2024-12-12T05:38:59,546 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/3b8a0ad771f14bd19ef5bd32141f1472 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/3b8a0ad771f14bd19ef5bd32141f1472 2024-12-12T05:38:59,561 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/3b8a0ad771f14bd19ef5bd32141f1472, entries=150, sequenceid=80, filesize=11.7 K 2024-12-12T05:38:59,563 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 61279763b720b7a9988338e6150d61c7 in 622ms, sequenceid=80, compaction requested=false 2024-12-12T05:38:59,563 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:38:59,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 61279763b720b7a9988338e6150d61c7 2024-12-12T05:38:59,588 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 61279763b720b7a9988338e6150d61c7 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-12T05:38:59,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=A 2024-12-12T05:38:59,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:38:59,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=B 2024-12-12T05:38:59,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:38:59,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=C 2024-12-12T05:38:59,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:38:59,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-12T05:38:59,604 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/41b59c3b0bd3496893ab7d34c15c4433 is 50, key is test_row_0/A:col10/1733981938967/Put/seqid=0 2024-12-12T05:38:59,612 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,613 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-12T05:38:59,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:59,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:38:59,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:59,614 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:59,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:59,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741854_1030 (size=12001) 2024-12-12T05:38:59,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:59,618 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/41b59c3b0bd3496893ab7d34c15c4433 2024-12-12T05:38:59,638 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:59,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733981999629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,639 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:59,639 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:59,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733981999631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733981999632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:59,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733981999636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:59,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733981999638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,642 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/647d7f752992443ea8330022a41efc10 is 50, key is test_row_0/B:col10/1733981938967/Put/seqid=0 2024-12-12T05:38:59,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741855_1031 (size=12001) 2024-12-12T05:38:59,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:59,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733981999741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,744 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:59,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:59,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733981999741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733981999741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,745 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:59,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733981999742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,745 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:59,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733981999742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,768 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,769 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-12T05:38:59,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:59,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:38:59,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:59,770 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:59,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:59,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:59,924 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,925 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-12T05:38:59,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:59,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:38:59,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:38:59,926 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:59,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:38:59,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:59,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733981999946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:59,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733981999946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:59,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733981999947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,950 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:59,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733981999947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:38:59,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:38:59,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733981999947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:00,057 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/647d7f752992443ea8330022a41efc10 2024-12-12T05:39:00,079 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:00,080 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-12T05:39:00,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:00,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:00,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:00,081 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:00,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:00,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:00,085 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/1aca729a96184f12b9cdb840b0fa0bb5 is 50, key is test_row_0/C:col10/1733981938967/Put/seqid=0 2024-12-12T05:39:00,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-12T05:39:00,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741856_1032 (size=12001) 2024-12-12T05:39:00,108 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/1aca729a96184f12b9cdb840b0fa0bb5 2024-12-12T05:39:00,119 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/41b59c3b0bd3496893ab7d34c15c4433 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/41b59c3b0bd3496893ab7d34c15c4433 2024-12-12T05:39:00,133 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/41b59c3b0bd3496893ab7d34c15c4433, entries=150, sequenceid=95, filesize=11.7 K 2024-12-12T05:39:00,135 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/647d7f752992443ea8330022a41efc10 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/647d7f752992443ea8330022a41efc10 2024-12-12T05:39:00,149 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/647d7f752992443ea8330022a41efc10, entries=150, sequenceid=95, filesize=11.7 K 2024-12-12T05:39:00,152 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/1aca729a96184f12b9cdb840b0fa0bb5 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/1aca729a96184f12b9cdb840b0fa0bb5 2024-12-12T05:39:00,173 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/1aca729a96184f12b9cdb840b0fa0bb5, entries=150, sequenceid=95, filesize=11.7 K 2024-12-12T05:39:00,175 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 61279763b720b7a9988338e6150d61c7 in 586ms, sequenceid=95, compaction requested=true 2024-12-12T05:39:00,175 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:00,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:39:00,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:00,175 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:00,175 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:00,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:39:00,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:00,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:39:00,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:00,177 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:00,177 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40786 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:00,177 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/B is initiating minor compaction (all files) 2024-12-12T05:39:00,177 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/A is initiating minor compaction (all files) 2024-12-12T05:39:00,178 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/B in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:00,178 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/A in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:00,178 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/1c364734fe5a455a8c013596fb07fef5, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/d433d93dd1dc43338ab34de56047dfbc, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/41b59c3b0bd3496893ab7d34c15c4433] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=39.8 K 2024-12-12T05:39:00,178 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/825d981f56644557b214efb2caa1132c, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/a27bbf8f0f6a412ba848fe808ee17ebd, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/647d7f752992443ea8330022a41efc10] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=35.3 K 2024-12-12T05:39:00,179 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 825d981f56644557b214efb2caa1132c, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733981938254 2024-12-12T05:39:00,179 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c364734fe5a455a8c013596fb07fef5, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733981938254 2024-12-12T05:39:00,180 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting a27bbf8f0f6a412ba848fe808ee17ebd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1733981938613 2024-12-12T05:39:00,181 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting d433d93dd1dc43338ab34de56047dfbc, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1733981938613 2024-12-12T05:39:00,182 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 41b59c3b0bd3496893ab7d34c15c4433, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733981938967 2024-12-12T05:39:00,182 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 647d7f752992443ea8330022a41efc10, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733981938967 2024-12-12T05:39:00,211 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#A#compaction#18 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:00,212 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/330dab8bc3bf4aa2afe9bf36c2aedf61 is 50, key is test_row_0/A:col10/1733981938967/Put/seqid=0 2024-12-12T05:39:00,213 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#B#compaction#19 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:00,213 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/10b24a813186451391712b8c33ce72d0 is 50, key is test_row_0/B:col10/1733981938967/Put/seqid=0 2024-12-12T05:39:00,235 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:00,235 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-12T05:39:00,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:00,236 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 61279763b720b7a9988338e6150d61c7 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-12T05:39:00,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=A 2024-12-12T05:39:00,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:00,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=B 2024-12-12T05:39:00,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:00,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=C 2024-12-12T05:39:00,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:00,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741857_1033 (size=12207) 2024-12-12T05:39:00,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:00,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 61279763b720b7a9988338e6150d61c7 2024-12-12T05:39:00,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/1e3bab22b0c04e2cafecac8a4df240a0 is 50, key is test_row_0/A:col10/1733981939635/Put/seqid=0 2024-12-12T05:39:00,265 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/10b24a813186451391712b8c33ce72d0 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/10b24a813186451391712b8c33ce72d0 2024-12-12T05:39:00,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741858_1034 (size=12207) 2024-12-12T05:39:00,300 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/330dab8bc3bf4aa2afe9bf36c2aedf61 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/330dab8bc3bf4aa2afe9bf36c2aedf61 2024-12-12T05:39:00,300 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:00,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982000269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:00,301 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:00,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982000271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:00,304 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:00,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982000296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:00,305 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:00,304 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:00,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982000301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:00,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982000298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:00,305 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 61279763b720b7a9988338e6150d61c7/B of 61279763b720b7a9988338e6150d61c7 into 10b24a813186451391712b8c33ce72d0(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:00,306 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:00,306 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/B, priority=13, startTime=1733981940175; duration=0sec 2024-12-12T05:39:00,306 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:00,306 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:B 2024-12-12T05:39:00,306 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:00,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741859_1035 (size=12001) 2024-12-12T05:39:00,310 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:00,310 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/C is initiating minor compaction (all files) 2024-12-12T05:39:00,310 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/C in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:00,311 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/7cb0303fe8524b67be4595951fa85546, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/3b8a0ad771f14bd19ef5bd32141f1472, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/1aca729a96184f12b9cdb840b0fa0bb5] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=35.3 K 2024-12-12T05:39:00,312 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 7cb0303fe8524b67be4595951fa85546, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733981938254 2024-12-12T05:39:00,313 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b8a0ad771f14bd19ef5bd32141f1472, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1733981938613 2024-12-12T05:39:00,314 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/1e3bab22b0c04e2cafecac8a4df240a0 2024-12-12T05:39:00,315 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 61279763b720b7a9988338e6150d61c7/A of 61279763b720b7a9988338e6150d61c7 into 330dab8bc3bf4aa2afe9bf36c2aedf61(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:00,315 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:00,315 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/A, priority=13, startTime=1733981940175; duration=0sec 2024-12-12T05:39:00,315 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:00,316 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:A 2024-12-12T05:39:00,317 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 1aca729a96184f12b9cdb840b0fa0bb5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733981938967 2024-12-12T05:39:00,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/c9c0c3efe03a4cdc843bcacb955108af is 50, key is test_row_0/B:col10/1733981939635/Put/seqid=0 2024-12-12T05:39:00,349 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#C#compaction#22 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:00,349 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/34e146b086f14b779c0cb22250031415 is 50, key is test_row_0/C:col10/1733981938967/Put/seqid=0 2024-12-12T05:39:00,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741860_1036 (size=12001) 2024-12-12T05:39:00,367 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/c9c0c3efe03a4cdc843bcacb955108af 2024-12-12T05:39:00,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/255c1fd49ae74df4a9e05b376a49c1b1 is 50, key is test_row_0/C:col10/1733981939635/Put/seqid=0 2024-12-12T05:39:00,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741861_1037 (size=12207) 2024-12-12T05:39:00,402 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:00,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982000402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:00,408 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:00,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982000404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:00,409 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/34e146b086f14b779c0cb22250031415 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/34e146b086f14b779c0cb22250031415 2024-12-12T05:39:00,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:00,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982000407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:00,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:00,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:00,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982000408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:00,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982000407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:00,424 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 61279763b720b7a9988338e6150d61c7/C of 61279763b720b7a9988338e6150d61c7 into 34e146b086f14b779c0cb22250031415(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:00,424 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:00,424 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/C, priority=13, startTime=1733981940176; duration=0sec 2024-12-12T05:39:00,424 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:00,424 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:C 2024-12-12T05:39:00,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741862_1038 (size=12001) 2024-12-12T05:39:00,428 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/255c1fd49ae74df4a9e05b376a49c1b1 2024-12-12T05:39:00,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/1e3bab22b0c04e2cafecac8a4df240a0 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/1e3bab22b0c04e2cafecac8a4df240a0 2024-12-12T05:39:00,453 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/1e3bab22b0c04e2cafecac8a4df240a0, entries=150, sequenceid=117, filesize=11.7 K 2024-12-12T05:39:00,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/c9c0c3efe03a4cdc843bcacb955108af as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/c9c0c3efe03a4cdc843bcacb955108af 2024-12-12T05:39:00,468 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/c9c0c3efe03a4cdc843bcacb955108af, entries=150, sequenceid=117, filesize=11.7 K 2024-12-12T05:39:00,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/255c1fd49ae74df4a9e05b376a49c1b1 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/255c1fd49ae74df4a9e05b376a49c1b1 2024-12-12T05:39:00,487 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/255c1fd49ae74df4a9e05b376a49c1b1, entries=150, sequenceid=117, filesize=11.7 K 2024-12-12T05:39:00,488 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 61279763b720b7a9988338e6150d61c7 in 252ms, sequenceid=117, compaction requested=false 2024-12-12T05:39:00,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:00,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:00,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-12T05:39:00,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-12T05:39:00,494 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-12T05:39:00,494 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4970 sec 2024-12-12T05:39:00,498 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 1.5090 sec 2024-12-12T05:39:00,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 61279763b720b7a9988338e6150d61c7 2024-12-12T05:39:00,609 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 61279763b720b7a9988338e6150d61c7 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-12T05:39:00,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=A 2024-12-12T05:39:00,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:00,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=B 2024-12-12T05:39:00,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:00,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=C 2024-12-12T05:39:00,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:00,619 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/1182e0ada64d419dad86942751fdf511 is 50, key is test_row_0/A:col10/1733981940607/Put/seqid=0 2024-12-12T05:39:00,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741863_1039 (size=12101) 2024-12-12T05:39:00,649 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:00,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982000644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:00,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:00,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982000646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:00,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:00,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982000648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:00,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:00,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982000648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:00,652 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:00,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982000650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:00,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:00,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982000752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:00,754 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:00,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982000752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:00,754 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:00,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982000753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:00,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:00,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982000755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:00,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:00,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982000755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:00,957 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:00,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982000956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:00,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:00,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982000956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:00,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:00,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982000959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:00,964 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:00,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982000960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:00,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:00,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982000963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:01,039 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/1182e0ada64d419dad86942751fdf511 2024-12-12T05:39:01,060 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/5d85011d43ac48a69d03e33e62e0fb42 is 50, key is test_row_0/B:col10/1733981940607/Put/seqid=0 2024-12-12T05:39:01,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741864_1040 (size=12101) 2024-12-12T05:39:01,079 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/5d85011d43ac48a69d03e33e62e0fb42 2024-12-12T05:39:01,096 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/cc5536071b414cfa97afe5b40b6afda7 is 50, key is test_row_0/C:col10/1733981940607/Put/seqid=0 2024-12-12T05:39:01,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-12T05:39:01,101 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-12-12T05:39:01,103 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:39:01,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-12-12T05:39:01,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-12T05:39:01,106 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:39:01,107 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:39:01,107 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:39:01,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741865_1041 (size=12101) 2024-12-12T05:39:01,130 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/cc5536071b414cfa97afe5b40b6afda7 2024-12-12T05:39:01,141 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/1182e0ada64d419dad86942751fdf511 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/1182e0ada64d419dad86942751fdf511 2024-12-12T05:39:01,153 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/1182e0ada64d419dad86942751fdf511, entries=150, sequenceid=135, filesize=11.8 K 2024-12-12T05:39:01,156 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/5d85011d43ac48a69d03e33e62e0fb42 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/5d85011d43ac48a69d03e33e62e0fb42 2024-12-12T05:39:01,170 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/5d85011d43ac48a69d03e33e62e0fb42, entries=150, sequenceid=135, filesize=11.8 K 2024-12-12T05:39:01,174 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/cc5536071b414cfa97afe5b40b6afda7 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/cc5536071b414cfa97afe5b40b6afda7 2024-12-12T05:39:01,185 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/cc5536071b414cfa97afe5b40b6afda7, entries=150, sequenceid=135, filesize=11.8 K 2024-12-12T05:39:01,187 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 61279763b720b7a9988338e6150d61c7 in 578ms, sequenceid=135, compaction requested=true 2024-12-12T05:39:01,187 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:01,187 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:01,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:39:01,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:01,188 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:01,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:39:01,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:01,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:39:01,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:01,189 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:01,189 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/A is initiating minor compaction (all files) 2024-12-12T05:39:01,189 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/A in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:01,189 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/330dab8bc3bf4aa2afe9bf36c2aedf61, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/1e3bab22b0c04e2cafecac8a4df240a0, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/1182e0ada64d419dad86942751fdf511] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=35.5 K 2024-12-12T05:39:01,190 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 330dab8bc3bf4aa2afe9bf36c2aedf61, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733981938967 2024-12-12T05:39:01,191 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:01,191 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/B is initiating minor compaction (all files) 2024-12-12T05:39:01,191 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/B in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:01,191 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/10b24a813186451391712b8c33ce72d0, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/c9c0c3efe03a4cdc843bcacb955108af, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/5d85011d43ac48a69d03e33e62e0fb42] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=35.5 K 2024-12-12T05:39:01,192 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e3bab22b0c04e2cafecac8a4df240a0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733981939627 2024-12-12T05:39:01,192 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 10b24a813186451391712b8c33ce72d0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733981938967 2024-12-12T05:39:01,192 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1182e0ada64d419dad86942751fdf511, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733981940271 2024-12-12T05:39:01,192 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting c9c0c3efe03a4cdc843bcacb955108af, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733981939627 2024-12-12T05:39:01,194 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d85011d43ac48a69d03e33e62e0fb42, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733981940271 2024-12-12T05:39:01,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-12T05:39:01,217 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#B#compaction#28 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:01,218 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/731c2d75e4e441088480b4d85d825793 is 50, key is test_row_0/B:col10/1733981940607/Put/seqid=0 2024-12-12T05:39:01,220 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#A#compaction#27 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:01,222 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/f4fd0e00b98848ebbd75dbf6e05436ea is 50, key is test_row_0/A:col10/1733981940607/Put/seqid=0 2024-12-12T05:39:01,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741866_1042 (size=12409) 2024-12-12T05:39:01,259 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/731c2d75e4e441088480b4d85d825793 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/731c2d75e4e441088480b4d85d825793 2024-12-12T05:39:01,261 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:01,262 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-12T05:39:01,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:01,262 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 61279763b720b7a9988338e6150d61c7 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-12T05:39:01,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=A 2024-12-12T05:39:01,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741867_1043 (size=12409) 2024-12-12T05:39:01,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:01,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=B 2024-12-12T05:39:01,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:01,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=C 2024-12-12T05:39:01,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:01,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 61279763b720b7a9988338e6150d61c7 2024-12-12T05:39:01,266 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:01,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/e1ce93b5dfe04a5d8c0dc9bb48b82adc is 50, key is test_row_0/A:col10/1733981940646/Put/seqid=0 2024-12-12T05:39:01,276 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 61279763b720b7a9988338e6150d61c7/B of 61279763b720b7a9988338e6150d61c7 into 731c2d75e4e441088480b4d85d825793(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:01,276 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:01,276 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/B, priority=13, startTime=1733981941188; duration=0sec 2024-12-12T05:39:01,276 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:01,277 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:B 2024-12-12T05:39:01,277 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:01,279 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:01,279 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/C is initiating minor compaction (all files) 2024-12-12T05:39:01,279 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/C in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:01,279 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/34e146b086f14b779c0cb22250031415, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/255c1fd49ae74df4a9e05b376a49c1b1, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/cc5536071b414cfa97afe5b40b6afda7] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=35.5 K 2024-12-12T05:39:01,280 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 34e146b086f14b779c0cb22250031415, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733981938967 2024-12-12T05:39:01,283 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 255c1fd49ae74df4a9e05b376a49c1b1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733981939627 2024-12-12T05:39:01,284 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting cc5536071b414cfa97afe5b40b6afda7, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733981940271 2024-12-12T05:39:01,296 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:01,295 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:01,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982001282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:01,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982001285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:01,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741868_1044 (size=12151) 2024-12-12T05:39:01,299 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:01,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982001292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:01,300 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:01,300 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:01,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982001292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:01,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982001296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:01,303 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/e1ce93b5dfe04a5d8c0dc9bb48b82adc 2024-12-12T05:39:01,305 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#C#compaction#30 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:01,305 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/3f918a04391c4875a6b4a55690a15ccd is 50, key is test_row_0/C:col10/1733981940607/Put/seqid=0 2024-12-12T05:39:01,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/e72aef580c2447d2953c12823cae62e5 is 50, key is test_row_0/B:col10/1733981940646/Put/seqid=0 2024-12-12T05:39:01,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741869_1045 (size=12409) 2024-12-12T05:39:01,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741870_1046 (size=12151) 2024-12-12T05:39:01,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:01,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982001398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:01,401 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:01,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982001398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:01,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:01,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982001402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:01,406 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:01,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982001405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:01,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:01,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982001405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:01,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-12T05:39:01,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:01,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982001603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:01,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:01,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982001604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:01,607 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:01,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982001605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:01,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:01,612 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:01,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982001611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:01,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982001609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:01,674 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/f4fd0e00b98848ebbd75dbf6e05436ea as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/f4fd0e00b98848ebbd75dbf6e05436ea 2024-12-12T05:39:01,686 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 61279763b720b7a9988338e6150d61c7/A of 61279763b720b7a9988338e6150d61c7 into f4fd0e00b98848ebbd75dbf6e05436ea(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:01,686 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:01,686 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/A, priority=13, startTime=1733981941187; duration=0sec 2024-12-12T05:39:01,686 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:01,686 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:A 2024-12-12T05:39:01,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-12T05:39:01,752 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/3f918a04391c4875a6b4a55690a15ccd as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/3f918a04391c4875a6b4a55690a15ccd 2024-12-12T05:39:01,759 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/e72aef580c2447d2953c12823cae62e5 2024-12-12T05:39:01,767 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 61279763b720b7a9988338e6150d61c7/C of 61279763b720b7a9988338e6150d61c7 into 3f918a04391c4875a6b4a55690a15ccd(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:01,767 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:01,767 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/C, priority=13, startTime=1733981941188; duration=0sec 2024-12-12T05:39:01,768 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:01,768 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:C 2024-12-12T05:39:01,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/a7b24f16b67a48d6a8a81a974509dc7d is 50, key is test_row_0/C:col10/1733981940646/Put/seqid=0 2024-12-12T05:39:01,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741871_1047 (size=12151) 2024-12-12T05:39:01,811 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/a7b24f16b67a48d6a8a81a974509dc7d 2024-12-12T05:39:01,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/e1ce93b5dfe04a5d8c0dc9bb48b82adc as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/e1ce93b5dfe04a5d8c0dc9bb48b82adc 2024-12-12T05:39:01,830 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/e1ce93b5dfe04a5d8c0dc9bb48b82adc, entries=150, sequenceid=156, filesize=11.9 K 2024-12-12T05:39:01,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.StoreScanner(992): StoreScanner already has the close lock. There is no need to updateReaders 2024-12-12T05:39:01,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/e72aef580c2447d2953c12823cae62e5 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/e72aef580c2447d2953c12823cae62e5 2024-12-12T05:39:01,848 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/e72aef580c2447d2953c12823cae62e5, entries=150, sequenceid=156, filesize=11.9 K 2024-12-12T05:39:01,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/a7b24f16b67a48d6a8a81a974509dc7d as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/a7b24f16b67a48d6a8a81a974509dc7d 2024-12-12T05:39:01,865 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/a7b24f16b67a48d6a8a81a974509dc7d, entries=150, sequenceid=156, filesize=11.9 K 2024-12-12T05:39:01,866 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 61279763b720b7a9988338e6150d61c7 in 604ms, sequenceid=156, compaction requested=false 2024-12-12T05:39:01,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:01,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:01,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-12T05:39:01,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-12T05:39:01,874 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-12T05:39:01,874 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 762 msec 2024-12-12T05:39:01,876 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 772 msec 2024-12-12T05:39:01,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 61279763b720b7a9988338e6150d61c7 2024-12-12T05:39:01,909 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 61279763b720b7a9988338e6150d61c7 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-12T05:39:01,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=A 2024-12-12T05:39:01,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:01,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=B 2024-12-12T05:39:01,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:01,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=C 2024-12-12T05:39:01,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:01,926 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/45d01d7d3543467d9ee7bba23cbb2c4e is 50, key is test_row_0/A:col10/1733981941288/Put/seqid=0 2024-12-12T05:39:01,937 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:01,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:01,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982001934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:01,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982001934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:01,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:01,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982001936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:01,940 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:01,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982001937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:01,942 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:01,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982001938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:01,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741872_1048 (size=12151) 2024-12-12T05:39:01,952 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/45d01d7d3543467d9ee7bba23cbb2c4e 2024-12-12T05:39:01,967 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/ca6db3ab439e49fa85f7a3f8105b44a6 is 50, key is test_row_0/B:col10/1733981941288/Put/seqid=0 2024-12-12T05:39:01,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741873_1049 (size=12151) 2024-12-12T05:39:01,990 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/ca6db3ab439e49fa85f7a3f8105b44a6 2024-12-12T05:39:02,005 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/069b97931b5b45619d9742dac3068330 is 50, key is test_row_0/C:col10/1733981941288/Put/seqid=0 2024-12-12T05:39:02,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741874_1050 (size=12151) 2024-12-12T05:39:02,017 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/069b97931b5b45619d9742dac3068330 2024-12-12T05:39:02,028 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/45d01d7d3543467d9ee7bba23cbb2c4e as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/45d01d7d3543467d9ee7bba23cbb2c4e 2024-12-12T05:39:02,043 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:02,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982002040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:02,043 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:02,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982002040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:02,044 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:02,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982002041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:02,044 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:02,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982002042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:02,046 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/45d01d7d3543467d9ee7bba23cbb2c4e, entries=150, sequenceid=175, filesize=11.9 K 2024-12-12T05:39:02,048 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/ca6db3ab439e49fa85f7a3f8105b44a6 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/ca6db3ab439e49fa85f7a3f8105b44a6 2024-12-12T05:39:02,050 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:02,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982002045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:02,057 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/ca6db3ab439e49fa85f7a3f8105b44a6, entries=150, sequenceid=175, filesize=11.9 K 2024-12-12T05:39:02,059 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/069b97931b5b45619d9742dac3068330 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/069b97931b5b45619d9742dac3068330 2024-12-12T05:39:02,068 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/069b97931b5b45619d9742dac3068330, entries=150, sequenceid=175, filesize=11.9 K 2024-12-12T05:39:02,070 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 61279763b720b7a9988338e6150d61c7 in 160ms, sequenceid=175, compaction requested=true 2024-12-12T05:39:02,070 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:02,070 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:39:02,070 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:02,070 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:02,070 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:39:02,070 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:02,070 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:02,070 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:39:02,070 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:02,072 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:02,072 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:02,072 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/B is initiating minor compaction (all files) 2024-12-12T05:39:02,072 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/A is initiating minor compaction (all files) 2024-12-12T05:39:02,072 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/B in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:02,072 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/A in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:02,072 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/731c2d75e4e441088480b4d85d825793, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/e72aef580c2447d2953c12823cae62e5, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/ca6db3ab439e49fa85f7a3f8105b44a6] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=35.9 K 2024-12-12T05:39:02,072 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/f4fd0e00b98848ebbd75dbf6e05436ea, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/e1ce93b5dfe04a5d8c0dc9bb48b82adc, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/45d01d7d3543467d9ee7bba23cbb2c4e] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=35.9 K 2024-12-12T05:39:02,072 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 731c2d75e4e441088480b4d85d825793, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733981940271 2024-12-12T05:39:02,073 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting f4fd0e00b98848ebbd75dbf6e05436ea, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733981940271 2024-12-12T05:39:02,073 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting e72aef580c2447d2953c12823cae62e5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733981940645 2024-12-12T05:39:02,074 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting e1ce93b5dfe04a5d8c0dc9bb48b82adc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733981940645 2024-12-12T05:39:02,074 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting ca6db3ab439e49fa85f7a3f8105b44a6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733981941288 2024-12-12T05:39:02,074 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45d01d7d3543467d9ee7bba23cbb2c4e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733981941288 2024-12-12T05:39:02,093 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#A#compaction#36 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:02,094 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/d6e0ab29e6a34096a505ac485098d66a is 50, key is test_row_0/A:col10/1733981941288/Put/seqid=0 2024-12-12T05:39:02,097 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#B#compaction#37 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:02,098 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/b6f84380b81147f8be1250d923faa71f is 50, key is test_row_0/B:col10/1733981941288/Put/seqid=0 2024-12-12T05:39:02,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741875_1051 (size=12561) 2024-12-12T05:39:02,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741876_1052 (size=12561) 2024-12-12T05:39:02,140 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/b6f84380b81147f8be1250d923faa71f as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/b6f84380b81147f8be1250d923faa71f 2024-12-12T05:39:02,149 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 61279763b720b7a9988338e6150d61c7/B of 61279763b720b7a9988338e6150d61c7 into b6f84380b81147f8be1250d923faa71f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:02,149 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:02,149 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/B, priority=13, startTime=1733981942070; duration=0sec 2024-12-12T05:39:02,149 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:02,149 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:B 2024-12-12T05:39:02,149 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:02,151 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:02,151 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/C is initiating minor compaction (all files) 2024-12-12T05:39:02,153 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/C in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:02,153 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/3f918a04391c4875a6b4a55690a15ccd, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/a7b24f16b67a48d6a8a81a974509dc7d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/069b97931b5b45619d9742dac3068330] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=35.9 K 2024-12-12T05:39:02,156 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 3f918a04391c4875a6b4a55690a15ccd, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733981940271 2024-12-12T05:39:02,157 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting a7b24f16b67a48d6a8a81a974509dc7d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733981940645 2024-12-12T05:39:02,158 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 069b97931b5b45619d9742dac3068330, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733981941288 2024-12-12T05:39:02,175 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#C#compaction#38 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:02,176 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/ac7cb0ab3c5d420ba366ceb2cd61db32 is 50, key is test_row_0/C:col10/1733981941288/Put/seqid=0 2024-12-12T05:39:02,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741877_1053 (size=12561) 2024-12-12T05:39:02,211 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/ac7cb0ab3c5d420ba366ceb2cd61db32 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/ac7cb0ab3c5d420ba366ceb2cd61db32 2024-12-12T05:39:02,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-12T05:39:02,214 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-12-12T05:39:02,217 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:39:02,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-12-12T05:39:02,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-12T05:39:02,225 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:39:02,227 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:39:02,227 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:39:02,230 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 61279763b720b7a9988338e6150d61c7/C of 61279763b720b7a9988338e6150d61c7 into ac7cb0ab3c5d420ba366ceb2cd61db32(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:02,233 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:02,233 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/C, priority=13, startTime=1733981942070; duration=0sec 2024-12-12T05:39:02,233 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:02,233 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:C 2024-12-12T05:39:02,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 61279763b720b7a9988338e6150d61c7 2024-12-12T05:39:02,250 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 61279763b720b7a9988338e6150d61c7 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-12T05:39:02,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=A 2024-12-12T05:39:02,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:02,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=B 2024-12-12T05:39:02,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:02,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=C 2024-12-12T05:39:02,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:02,271 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:02,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982002266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:02,272 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:02,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982002268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:02,273 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/edd1893ea65c4998a98c481888eb9d20 is 50, key is test_row_0/A:col10/1733981942248/Put/seqid=0 2024-12-12T05:39:02,275 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:02,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982002270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:02,277 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:02,278 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:02,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982002272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:02,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982002272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:02,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741878_1054 (size=12151) 2024-12-12T05:39:02,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-12T05:39:02,375 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:02,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982002374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:02,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:02,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982002374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:02,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:02,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982002378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:02,381 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:02,381 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:02,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982002381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:02,382 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-12T05:39:02,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:02,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:02,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:02,383 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:02,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:02,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:02,387 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:02,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982002387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:02,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-12T05:39:02,526 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/d6e0ab29e6a34096a505ac485098d66a as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/d6e0ab29e6a34096a505ac485098d66a 2024-12-12T05:39:02,536 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:02,537 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-12T05:39:02,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:02,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:02,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:02,537 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:02,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:02,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:02,541 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 61279763b720b7a9988338e6150d61c7/A of 61279763b720b7a9988338e6150d61c7 into d6e0ab29e6a34096a505ac485098d66a(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:02,541 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:02,541 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/A, priority=13, startTime=1733981942070; duration=0sec 2024-12-12T05:39:02,543 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:02,543 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:A 2024-12-12T05:39:02,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:02,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982002577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:02,579 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:02,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982002579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:02,581 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:02,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982002581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:02,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:02,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982002583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:02,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:02,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982002589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:02,690 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/edd1893ea65c4998a98c481888eb9d20 2024-12-12T05:39:02,691 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:02,692 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-12T05:39:02,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:02,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:02,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:02,692 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:02,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:02,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:02,708 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/04992170a7c34c679a4291aab1ff977b is 50, key is test_row_0/B:col10/1733981942248/Put/seqid=0 2024-12-12T05:39:02,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741879_1055 (size=12151) 2024-12-12T05:39:02,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-12T05:39:02,846 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:02,847 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-12T05:39:02,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:02,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:02,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:02,848 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:02,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:02,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:02,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:02,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982002879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:02,884 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:02,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982002883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:02,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:02,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982002885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:02,886 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:02,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982002885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:02,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:02,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982002894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:03,001 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:03,002 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-12T05:39:03,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:03,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:03,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:03,002 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:03,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:03,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:03,126 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/04992170a7c34c679a4291aab1ff977b 2024-12-12T05:39:03,140 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/f5a1ec1fe29749b59c386313d65fb30c is 50, key is test_row_0/C:col10/1733981942248/Put/seqid=0 2024-12-12T05:39:03,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741880_1056 (size=12151) 2024-12-12T05:39:03,156 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:03,157 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/f5a1ec1fe29749b59c386313d65fb30c 2024-12-12T05:39:03,158 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-12T05:39:03,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:03,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:03,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:03,158 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:03,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:03,168 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/edd1893ea65c4998a98c481888eb9d20 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/edd1893ea65c4998a98c481888eb9d20 2024-12-12T05:39:03,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:03,177 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/edd1893ea65c4998a98c481888eb9d20, entries=150, sequenceid=200, filesize=11.9 K 2024-12-12T05:39:03,178 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/04992170a7c34c679a4291aab1ff977b as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/04992170a7c34c679a4291aab1ff977b 2024-12-12T05:39:03,185 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/04992170a7c34c679a4291aab1ff977b, entries=150, sequenceid=200, filesize=11.9 K 2024-12-12T05:39:03,187 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/f5a1ec1fe29749b59c386313d65fb30c as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/f5a1ec1fe29749b59c386313d65fb30c 2024-12-12T05:39:03,194 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/f5a1ec1fe29749b59c386313d65fb30c, entries=150, sequenceid=200, filesize=11.9 K 2024-12-12T05:39:03,195 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 61279763b720b7a9988338e6150d61c7 in 945ms, sequenceid=200, compaction requested=false 2024-12-12T05:39:03,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:03,321 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:03,321 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-12T05:39:03,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:03,322 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 61279763b720b7a9988338e6150d61c7 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-12T05:39:03,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=A 2024-12-12T05:39:03,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:03,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=B 2024-12-12T05:39:03,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:03,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=C 2024-12-12T05:39:03,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:03,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-12T05:39:03,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/cc9f2d37dc9a41a39790e2ed27ea5816 is 50, key is test_row_0/A:col10/1733981942269/Put/seqid=0 2024-12-12T05:39:03,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741881_1057 (size=12151) 2024-12-12T05:39:03,389 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:03,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 61279763b720b7a9988338e6150d61c7 2024-12-12T05:39:03,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:03,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982003417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:03,428 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:03,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982003421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:03,428 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:03,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982003421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:03,429 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:03,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982003426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:03,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:03,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982003426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:03,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:03,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982003527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:03,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:03,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982003529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:03,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:03,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982003530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:03,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:03,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982003531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:03,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:03,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982003533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:03,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:03,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982003731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:03,734 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:03,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982003731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:03,737 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:03,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982003736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:03,738 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:03,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982003736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:03,739 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:03,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982003737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:03,742 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/cc9f2d37dc9a41a39790e2ed27ea5816 2024-12-12T05:39:03,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/8229038cd9fe4bd9bfd5ab45c1671635 is 50, key is test_row_0/B:col10/1733981942269/Put/seqid=0 2024-12-12T05:39:03,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741882_1058 (size=12151) 2024-12-12T05:39:04,037 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:04,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982004035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:04,038 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:04,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982004037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:04,040 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:04,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982004040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:04,044 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:04,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982004041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:04,044 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:04,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982004041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:04,182 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/8229038cd9fe4bd9bfd5ab45c1671635 2024-12-12T05:39:04,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/d1cb41feddbf46f382b24cac85e4b228 is 50, key is test_row_0/C:col10/1733981942269/Put/seqid=0 2024-12-12T05:39:04,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741883_1059 (size=12151) 2024-12-12T05:39:04,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-12T05:39:04,541 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:04,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982004540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:04,545 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:04,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982004544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:04,548 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:04,548 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:04,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982004545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:04,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982004548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:04,549 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:04,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982004548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:04,610 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/d1cb41feddbf46f382b24cac85e4b228 2024-12-12T05:39:04,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/cc9f2d37dc9a41a39790e2ed27ea5816 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/cc9f2d37dc9a41a39790e2ed27ea5816 2024-12-12T05:39:04,628 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/cc9f2d37dc9a41a39790e2ed27ea5816, entries=150, sequenceid=215, filesize=11.9 K 2024-12-12T05:39:04,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/8229038cd9fe4bd9bfd5ab45c1671635 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/8229038cd9fe4bd9bfd5ab45c1671635 2024-12-12T05:39:04,638 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/8229038cd9fe4bd9bfd5ab45c1671635, entries=150, sequenceid=215, filesize=11.9 K 2024-12-12T05:39:04,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/d1cb41feddbf46f382b24cac85e4b228 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/d1cb41feddbf46f382b24cac85e4b228 2024-12-12T05:39:04,646 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/d1cb41feddbf46f382b24cac85e4b228, entries=150, sequenceid=215, filesize=11.9 K 2024-12-12T05:39:04,648 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 61279763b720b7a9988338e6150d61c7 in 1326ms, sequenceid=215, compaction requested=true 2024-12-12T05:39:04,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:04,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:04,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-12T05:39:04,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-12T05:39:04,653 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-12T05:39:04,654 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4220 sec 2024-12-12T05:39:04,656 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 2.4380 sec 2024-12-12T05:39:05,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 61279763b720b7a9988338e6150d61c7 2024-12-12T05:39:05,549 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 61279763b720b7a9988338e6150d61c7 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-12T05:39:05,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=A 2024-12-12T05:39:05,550 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:05,550 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=B 2024-12-12T05:39:05,550 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:05,550 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=C 2024-12-12T05:39:05,550 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:05,558 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/40d9210cbc5543aa91dd26ad7b2f52fd is 50, key is test_row_0/A:col10/1733981945547/Put/seqid=0 2024-12-12T05:39:05,570 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:05,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982005565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:05,572 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:05,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982005569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:05,573 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:05,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982005569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:05,574 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:05,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982005570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:05,574 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:05,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982005571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:05,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741884_1060 (size=12151) 2024-12-12T05:39:05,579 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/40d9210cbc5543aa91dd26ad7b2f52fd 2024-12-12T05:39:05,590 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/04f6819a17694280acf4571dfec93ed4 is 50, key is test_row_0/B:col10/1733981945547/Put/seqid=0 2024-12-12T05:39:05,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741885_1061 (size=12151) 2024-12-12T05:39:05,602 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/04f6819a17694280acf4571dfec93ed4 2024-12-12T05:39:05,619 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/72b5d4b80fa543b39a608cd58762d9d8 is 50, key is test_row_0/C:col10/1733981945547/Put/seqid=0 2024-12-12T05:39:05,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741886_1062 (size=12151) 2024-12-12T05:39:05,634 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/72b5d4b80fa543b39a608cd58762d9d8 2024-12-12T05:39:05,640 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/40d9210cbc5543aa91dd26ad7b2f52fd as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/40d9210cbc5543aa91dd26ad7b2f52fd 2024-12-12T05:39:05,646 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/40d9210cbc5543aa91dd26ad7b2f52fd, entries=150, sequenceid=238, filesize=11.9 K 2024-12-12T05:39:05,652 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/04f6819a17694280acf4571dfec93ed4 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/04f6819a17694280acf4571dfec93ed4 2024-12-12T05:39:05,663 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/04f6819a17694280acf4571dfec93ed4, entries=150, sequenceid=238, filesize=11.9 K 2024-12-12T05:39:05,664 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/72b5d4b80fa543b39a608cd58762d9d8 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/72b5d4b80fa543b39a608cd58762d9d8 2024-12-12T05:39:05,677 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:05,678 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:05,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982005674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:05,678 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/72b5d4b80fa543b39a608cd58762d9d8, entries=150, sequenceid=238, filesize=11.9 K 2024-12-12T05:39:05,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982005674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:05,680 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 61279763b720b7a9988338e6150d61c7 in 131ms, sequenceid=238, compaction requested=true 2024-12-12T05:39:05,680 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:05,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:39:05,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:05,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:39:05,680 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T05:39:05,680 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T05:39:05,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:05,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:39:05,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:05,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 61279763b720b7a9988338e6150d61c7 2024-12-12T05:39:05,682 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 61279763b720b7a9988338e6150d61c7 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-12T05:39:05,684 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T05:39:05,684 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/B is initiating minor compaction (all files) 2024-12-12T05:39:05,685 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/B in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:05,685 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/b6f84380b81147f8be1250d923faa71f, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/04992170a7c34c679a4291aab1ff977b, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/8229038cd9fe4bd9bfd5ab45c1671635, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/04f6819a17694280acf4571dfec93ed4] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=47.9 K 2024-12-12T05:39:05,686 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T05:39:05,686 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting b6f84380b81147f8be1250d923faa71f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733981941288 2024-12-12T05:39:05,686 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/A is initiating minor compaction (all files) 2024-12-12T05:39:05,686 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/A in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:05,686 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/d6e0ab29e6a34096a505ac485098d66a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/edd1893ea65c4998a98c481888eb9d20, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/cc9f2d37dc9a41a39790e2ed27ea5816, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/40d9210cbc5543aa91dd26ad7b2f52fd] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=47.9 K 2024-12-12T05:39:05,686 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 04992170a7c34c679a4291aab1ff977b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1733981942247 2024-12-12T05:39:05,687 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 8229038cd9fe4bd9bfd5ab45c1671635, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733981942260 2024-12-12T05:39:05,687 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting d6e0ab29e6a34096a505ac485098d66a, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733981941288 2024-12-12T05:39:05,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=A 2024-12-12T05:39:05,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:05,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=B 2024-12-12T05:39:05,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:05,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=C 2024-12-12T05:39:05,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:05,688 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting edd1893ea65c4998a98c481888eb9d20, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1733981942247 2024-12-12T05:39:05,688 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 04f6819a17694280acf4571dfec93ed4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733981943417 2024-12-12T05:39:05,689 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting cc9f2d37dc9a41a39790e2ed27ea5816, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733981942260 2024-12-12T05:39:05,691 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 40d9210cbc5543aa91dd26ad7b2f52fd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733981943417 2024-12-12T05:39:05,719 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/f743cda6233f43b4aa55f0839598ce6d is 50, key is test_row_0/A:col10/1733981945555/Put/seqid=0 2024-12-12T05:39:05,721 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#B#compaction#49 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:05,722 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/91ecb1b6a2ff43cfb444ba917a98afe9 is 50, key is test_row_0/B:col10/1733981945547/Put/seqid=0 2024-12-12T05:39:05,725 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:05,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982005720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:05,728 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:05,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982005726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:05,729 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#A#compaction#50 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:05,729 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:05,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982005726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:05,730 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/5b958e0815ac4dbe815a725423743846 is 50, key is test_row_0/A:col10/1733981945547/Put/seqid=0 2024-12-12T05:39:05,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741887_1063 (size=16931) 2024-12-12T05:39:05,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741888_1064 (size=12697) 2024-12-12T05:39:05,768 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/91ecb1b6a2ff43cfb444ba917a98afe9 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/91ecb1b6a2ff43cfb444ba917a98afe9 2024-12-12T05:39:05,779 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 61279763b720b7a9988338e6150d61c7/B of 61279763b720b7a9988338e6150d61c7 into 91ecb1b6a2ff43cfb444ba917a98afe9(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:05,780 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:05,780 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/B, priority=12, startTime=1733981945680; duration=0sec 2024-12-12T05:39:05,780 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:05,780 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:B 2024-12-12T05:39:05,780 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T05:39:05,782 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T05:39:05,783 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/C is initiating minor compaction (all files) 2024-12-12T05:39:05,783 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/C in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:05,783 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/ac7cb0ab3c5d420ba366ceb2cd61db32, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/f5a1ec1fe29749b59c386313d65fb30c, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/d1cb41feddbf46f382b24cac85e4b228, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/72b5d4b80fa543b39a608cd58762d9d8] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=47.9 K 2024-12-12T05:39:05,783 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting ac7cb0ab3c5d420ba366ceb2cd61db32, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733981941288 2024-12-12T05:39:05,784 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting f5a1ec1fe29749b59c386313d65fb30c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1733981942247 2024-12-12T05:39:05,784 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting d1cb41feddbf46f382b24cac85e4b228, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733981942260 2024-12-12T05:39:05,785 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 72b5d4b80fa543b39a608cd58762d9d8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733981943417 2024-12-12T05:39:05,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741889_1065 (size=12697) 2024-12-12T05:39:05,797 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/5b958e0815ac4dbe815a725423743846 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/5b958e0815ac4dbe815a725423743846 2024-12-12T05:39:05,807 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 61279763b720b7a9988338e6150d61c7/A of 61279763b720b7a9988338e6150d61c7 into 5b958e0815ac4dbe815a725423743846(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:05,807 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:05,807 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/A, priority=12, startTime=1733981945680; duration=0sec 2024-12-12T05:39:05,808 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:05,808 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:A 2024-12-12T05:39:05,809 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#C#compaction#51 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:05,810 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/eed6db4d118c4344b4fd2d949922194e is 50, key is test_row_0/C:col10/1733981945547/Put/seqid=0 2024-12-12T05:39:05,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:05,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982005827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:05,832 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:05,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982005831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:05,833 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:05,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982005831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:05,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741890_1066 (size=12697) 2024-12-12T05:39:05,847 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/eed6db4d118c4344b4fd2d949922194e as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/eed6db4d118c4344b4fd2d949922194e 2024-12-12T05:39:05,857 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 61279763b720b7a9988338e6150d61c7/C of 61279763b720b7a9988338e6150d61c7 into eed6db4d118c4344b4fd2d949922194e(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:05,857 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:05,857 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/C, priority=12, startTime=1733981945680; duration=0sec 2024-12-12T05:39:05,857 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:05,858 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:C 2024-12-12T05:39:05,881 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:05,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982005880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:05,882 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:05,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982005881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:06,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:06,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982006029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:06,035 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:06,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982006034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:06,035 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:06,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982006034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:06,145 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/f743cda6233f43b4aa55f0839598ce6d 2024-12-12T05:39:06,156 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/3983e4d54f7443188d1f6d573f6d3b61 is 50, key is test_row_0/B:col10/1733981945555/Put/seqid=0 2024-12-12T05:39:06,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741891_1067 (size=12151) 2024-12-12T05:39:06,160 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/3983e4d54f7443188d1f6d573f6d3b61 2024-12-12T05:39:06,169 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/be198ce7ef474b50a989edfda4a4c640 is 50, key is test_row_0/C:col10/1733981945555/Put/seqid=0 2024-12-12T05:39:06,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741892_1068 (size=12151) 2024-12-12T05:39:06,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:06,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:06,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982006185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:06,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982006185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:06,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-12T05:39:06,328 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-12-12T05:39:06,329 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:39:06,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-12-12T05:39:06,332 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:39:06,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-12T05:39:06,333 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:06,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982006333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:06,334 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:39:06,334 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:39:06,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:06,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982006336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:06,339 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:06,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982006338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:06,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-12T05:39:06,486 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:06,487 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T05:39:06,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:06,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:06,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:06,487 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:06,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:06,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:06,585 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/be198ce7ef474b50a989edfda4a4c640 2024-12-12T05:39:06,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/f743cda6233f43b4aa55f0839598ce6d as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/f743cda6233f43b4aa55f0839598ce6d 2024-12-12T05:39:06,595 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/f743cda6233f43b4aa55f0839598ce6d, entries=250, sequenceid=255, filesize=16.5 K 2024-12-12T05:39:06,596 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/3983e4d54f7443188d1f6d573f6d3b61 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/3983e4d54f7443188d1f6d573f6d3b61 2024-12-12T05:39:06,601 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/3983e4d54f7443188d1f6d573f6d3b61, entries=150, sequenceid=255, filesize=11.9 K 2024-12-12T05:39:06,602 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/be198ce7ef474b50a989edfda4a4c640 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/be198ce7ef474b50a989edfda4a4c640 2024-12-12T05:39:06,608 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/be198ce7ef474b50a989edfda4a4c640, entries=150, sequenceid=255, filesize=11.9 K 2024-12-12T05:39:06,609 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for 61279763b720b7a9988338e6150d61c7 in 928ms, sequenceid=255, compaction requested=false 2024-12-12T05:39:06,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:06,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-12T05:39:06,639 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:06,639 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T05:39:06,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:06,640 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 61279763b720b7a9988338e6150d61c7 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-12T05:39:06,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=A 2024-12-12T05:39:06,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:06,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=B 2024-12-12T05:39:06,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:06,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=C 2024-12-12T05:39:06,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:06,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/b6d79912ac2542a4abc9bb31be910f2a is 50, key is test_row_0/A:col10/1733981945723/Put/seqid=0 2024-12-12T05:39:06,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741893_1069 (size=12301) 2024-12-12T05:39:06,650 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/b6d79912ac2542a4abc9bb31be910f2a 2024-12-12T05:39:06,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/45883efa055c499b901d26f1635963de is 50, key is test_row_0/B:col10/1733981945723/Put/seqid=0 2024-12-12T05:39:06,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741894_1070 (size=12301) 2024-12-12T05:39:06,688 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/45883efa055c499b901d26f1635963de 2024-12-12T05:39:06,691 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:06,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 61279763b720b7a9988338e6150d61c7 2024-12-12T05:39:06,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/872d712e9c934546b8cfcdd27344aaf3 is 50, key is test_row_0/C:col10/1733981945723/Put/seqid=0 2024-12-12T05:39:06,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741895_1071 (size=12301) 2024-12-12T05:39:06,714 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/872d712e9c934546b8cfcdd27344aaf3 2024-12-12T05:39:06,721 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:06,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982006719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:06,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/b6d79912ac2542a4abc9bb31be910f2a as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/b6d79912ac2542a4abc9bb31be910f2a 2024-12-12T05:39:06,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:06,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982006722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:06,730 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/b6d79912ac2542a4abc9bb31be910f2a, entries=150, sequenceid=278, filesize=12.0 K 2024-12-12T05:39:06,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/45883efa055c499b901d26f1635963de as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/45883efa055c499b901d26f1635963de 2024-12-12T05:39:06,739 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/45883efa055c499b901d26f1635963de, entries=150, sequenceid=278, filesize=12.0 K 2024-12-12T05:39:06,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/872d712e9c934546b8cfcdd27344aaf3 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/872d712e9c934546b8cfcdd27344aaf3 2024-12-12T05:39:06,749 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/872d712e9c934546b8cfcdd27344aaf3, entries=150, sequenceid=278, filesize=12.0 K 2024-12-12T05:39:06,750 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 61279763b720b7a9988338e6150d61c7 in 110ms, sequenceid=278, compaction requested=true 2024-12-12T05:39:06,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:06,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:06,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-12T05:39:06,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-12T05:39:06,755 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-12T05:39:06,755 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 418 msec 2024-12-12T05:39:06,759 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 427 msec 2024-12-12T05:39:06,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 61279763b720b7a9988338e6150d61c7 2024-12-12T05:39:06,827 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 61279763b720b7a9988338e6150d61c7 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-12T05:39:06,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=A 2024-12-12T05:39:06,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:06,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=B 2024-12-12T05:39:06,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:06,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=C 2024-12-12T05:39:06,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:06,837 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/92fd91a4dec64ed19da24166e1161072 is 50, key is test_row_0/A:col10/1733981946825/Put/seqid=0 2024-12-12T05:39:06,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741896_1072 (size=17181) 2024-12-12T05:39:06,845 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/92fd91a4dec64ed19da24166e1161072 2024-12-12T05:39:06,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:06,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982006848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:06,855 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:06,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:06,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982006849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:06,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982006850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:06,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:06,855 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:06,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982006851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:06,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982006852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:06,858 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/e2475c2aa0fc4bdfa594052bb077b6f3 is 50, key is test_row_0/B:col10/1733981946825/Put/seqid=0 2024-12-12T05:39:06,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741897_1073 (size=12301) 2024-12-12T05:39:06,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-12T05:39:06,935 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-12-12T05:39:06,937 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:39:06,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-12-12T05:39:06,938 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:39:06,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-12T05:39:06,939 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:39:06,939 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:39:06,958 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:06,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982006955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:06,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:06,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982006956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:06,961 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:06,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982006957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:06,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:06,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982006957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:06,961 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:06,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982006957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:07,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-12T05:39:07,100 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:07,101 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-12T05:39:07,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:07,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:07,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:07,101 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:07,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:07,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:07,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:07,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982007160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:07,164 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:07,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982007162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:07,164 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:07,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982007162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:07,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:07,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982007163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:07,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:07,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982007163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:07,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-12T05:39:07,254 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:07,254 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-12T05:39:07,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:07,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:07,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:07,255 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:07,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:07,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:07,266 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/e2475c2aa0fc4bdfa594052bb077b6f3 2024-12-12T05:39:07,273 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/bfc1ee0da470492493dfe59aa5751b2b is 50, key is test_row_0/C:col10/1733981946825/Put/seqid=0 2024-12-12T05:39:07,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741898_1074 (size=12301) 2024-12-12T05:39:07,407 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:07,408 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-12T05:39:07,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:07,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:07,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:07,408 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:07,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:07,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:07,463 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:07,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982007462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:07,467 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:07,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982007465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:07,467 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:07,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982007467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:07,468 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:07,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982007467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:07,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:07,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982007467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:07,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-12T05:39:07,561 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:07,561 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-12T05:39:07,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:07,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:07,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:07,561 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:07,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:07,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:07,680 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/bfc1ee0da470492493dfe59aa5751b2b 2024-12-12T05:39:07,691 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/92fd91a4dec64ed19da24166e1161072 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/92fd91a4dec64ed19da24166e1161072 2024-12-12T05:39:07,700 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/92fd91a4dec64ed19da24166e1161072, entries=250, sequenceid=296, filesize=16.8 K 2024-12-12T05:39:07,713 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:07,714 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/e2475c2aa0fc4bdfa594052bb077b6f3 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/e2475c2aa0fc4bdfa594052bb077b6f3 2024-12-12T05:39:07,714 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-12T05:39:07,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:07,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:07,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:07,714 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:07,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:07,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:07,721 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/e2475c2aa0fc4bdfa594052bb077b6f3, entries=150, sequenceid=296, filesize=12.0 K 2024-12-12T05:39:07,723 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/bfc1ee0da470492493dfe59aa5751b2b as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/bfc1ee0da470492493dfe59aa5751b2b 2024-12-12T05:39:07,729 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/bfc1ee0da470492493dfe59aa5751b2b, entries=150, sequenceid=296, filesize=12.0 K 2024-12-12T05:39:07,739 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 61279763b720b7a9988338e6150d61c7 in 913ms, sequenceid=296, compaction requested=true 2024-12-12T05:39:07,739 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:07,739 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:39:07,739 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:07,739 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T05:39:07,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:39:07,740 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T05:39:07,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:07,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:39:07,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:07,741 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T05:39:07,741 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/B is initiating minor compaction (all files) 2024-12-12T05:39:07,741 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 59110 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T05:39:07,741 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/B in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:07,741 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/A is initiating minor compaction (all files) 2024-12-12T05:39:07,741 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/A in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:07,741 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/91ecb1b6a2ff43cfb444ba917a98afe9, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/3983e4d54f7443188d1f6d573f6d3b61, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/45883efa055c499b901d26f1635963de, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/e2475c2aa0fc4bdfa594052bb077b6f3] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=48.3 K 2024-12-12T05:39:07,742 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/5b958e0815ac4dbe815a725423743846, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/f743cda6233f43b4aa55f0839598ce6d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/b6d79912ac2542a4abc9bb31be910f2a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/92fd91a4dec64ed19da24166e1161072] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=57.7 K 2024-12-12T05:39:07,742 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 91ecb1b6a2ff43cfb444ba917a98afe9, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733981943417 2024-12-12T05:39:07,742 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b958e0815ac4dbe815a725423743846, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733981943417 2024-12-12T05:39:07,742 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 3983e4d54f7443188d1f6d573f6d3b61, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733981945555 2024-12-12T05:39:07,742 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting f743cda6233f43b4aa55f0839598ce6d, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733981945555 2024-12-12T05:39:07,743 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 45883efa055c499b901d26f1635963de, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733981945717 2024-12-12T05:39:07,743 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting b6d79912ac2542a4abc9bb31be910f2a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733981945717 2024-12-12T05:39:07,743 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting e2475c2aa0fc4bdfa594052bb077b6f3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1733981946717 2024-12-12T05:39:07,743 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 92fd91a4dec64ed19da24166e1161072, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1733981946711 2024-12-12T05:39:07,755 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#B#compaction#60 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:07,755 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#A#compaction#61 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:07,756 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/8be5550c71c14de98de4767e5427e1a8 is 50, key is test_row_0/B:col10/1733981946825/Put/seqid=0 2024-12-12T05:39:07,756 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/1ca21348768a4608ad2f8f7be53b1184 is 50, key is test_row_0/A:col10/1733981946825/Put/seqid=0 2024-12-12T05:39:07,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741900_1076 (size=12983) 2024-12-12T05:39:07,775 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/8be5550c71c14de98de4767e5427e1a8 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/8be5550c71c14de98de4767e5427e1a8 2024-12-12T05:39:07,782 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 61279763b720b7a9988338e6150d61c7/B of 61279763b720b7a9988338e6150d61c7 into 8be5550c71c14de98de4767e5427e1a8(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:07,782 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:07,782 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/B, priority=12, startTime=1733981947739; duration=0sec 2024-12-12T05:39:07,782 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:07,782 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:B 2024-12-12T05:39:07,782 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T05:39:07,784 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T05:39:07,784 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/C is initiating minor compaction (all files) 2024-12-12T05:39:07,784 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/C in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:07,785 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/eed6db4d118c4344b4fd2d949922194e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/be198ce7ef474b50a989edfda4a4c640, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/872d712e9c934546b8cfcdd27344aaf3, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/bfc1ee0da470492493dfe59aa5751b2b] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=48.3 K 2024-12-12T05:39:07,785 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting eed6db4d118c4344b4fd2d949922194e, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733981943417 2024-12-12T05:39:07,786 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting be198ce7ef474b50a989edfda4a4c640, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733981945555 2024-12-12T05:39:07,786 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 872d712e9c934546b8cfcdd27344aaf3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733981945717 2024-12-12T05:39:07,787 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting bfc1ee0da470492493dfe59aa5751b2b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1733981946717 2024-12-12T05:39:07,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741899_1075 (size=12983) 2024-12-12T05:39:07,798 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/1ca21348768a4608ad2f8f7be53b1184 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/1ca21348768a4608ad2f8f7be53b1184 2024-12-12T05:39:07,805 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 61279763b720b7a9988338e6150d61c7/A of 61279763b720b7a9988338e6150d61c7 into 1ca21348768a4608ad2f8f7be53b1184(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:07,806 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:07,806 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/A, priority=12, startTime=1733981947739; duration=0sec 2024-12-12T05:39:07,806 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:07,806 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:A 2024-12-12T05:39:07,806 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#C#compaction#62 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:07,807 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/4fafd35ce189427fae5094904229ead4 is 50, key is test_row_0/C:col10/1733981946825/Put/seqid=0 2024-12-12T05:39:07,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741901_1077 (size=12983) 2024-12-12T05:39:07,889 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:07,890 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-12T05:39:07,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:07,890 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 61279763b720b7a9988338e6150d61c7 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-12T05:39:07,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=A 2024-12-12T05:39:07,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:07,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=B 2024-12-12T05:39:07,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:07,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=C 2024-12-12T05:39:07,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:07,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/db37227e1f2045709b6f005c1cfc6947 is 50, key is test_row_0/A:col10/1733981946846/Put/seqid=0 2024-12-12T05:39:07,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741902_1078 (size=12301) 2024-12-12T05:39:07,933 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/db37227e1f2045709b6f005c1cfc6947 2024-12-12T05:39:07,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/f296d6c818354c538495a152e1c84838 is 50, key is test_row_0/B:col10/1733981946846/Put/seqid=0 2024-12-12T05:39:07,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741903_1079 (size=12301) 2024-12-12T05:39:07,968 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:07,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 61279763b720b7a9988338e6150d61c7 2024-12-12T05:39:07,983 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:07,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982007980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:07,984 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:07,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982007980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:07,984 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:07,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982007981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:07,986 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:07,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982007983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:07,987 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:07,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982007983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:08,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-12T05:39:08,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:08,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982008085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:08,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:08,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982008085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:08,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:08,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982008085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:08,089 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:08,089 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:08,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982008087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:08,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982008087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:08,217 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/4fafd35ce189427fae5094904229ead4 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/4fafd35ce189427fae5094904229ead4 2024-12-12T05:39:08,222 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 61279763b720b7a9988338e6150d61c7/C of 61279763b720b7a9988338e6150d61c7 into 4fafd35ce189427fae5094904229ead4(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:08,222 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:08,222 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/C, priority=12, startTime=1733981947740; duration=0sec 2024-12-12T05:39:08,222 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:08,222 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:C 2024-12-12T05:39:08,288 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:08,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982008287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:08,289 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:08,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982008287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:08,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:08,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982008291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:08,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:08,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982008291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:08,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:08,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982008292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:08,351 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/f296d6c818354c538495a152e1c84838 2024-12-12T05:39:08,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/591991c048ba4c2798b4536b2ad9f8bd is 50, key is test_row_0/C:col10/1733981946846/Put/seqid=0 2024-12-12T05:39:08,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741904_1080 (size=12301) 2024-12-12T05:39:08,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:08,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982008591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:08,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:08,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982008592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:08,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:08,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982008594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:08,597 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:08,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982008596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:08,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:08,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982008597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:08,771 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/591991c048ba4c2798b4536b2ad9f8bd 2024-12-12T05:39:08,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/db37227e1f2045709b6f005c1cfc6947 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/db37227e1f2045709b6f005c1cfc6947 2024-12-12T05:39:08,781 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/db37227e1f2045709b6f005c1cfc6947, entries=150, sequenceid=316, filesize=12.0 K 2024-12-12T05:39:08,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/f296d6c818354c538495a152e1c84838 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/f296d6c818354c538495a152e1c84838 2024-12-12T05:39:08,787 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/f296d6c818354c538495a152e1c84838, entries=150, sequenceid=316, filesize=12.0 K 2024-12-12T05:39:08,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/591991c048ba4c2798b4536b2ad9f8bd as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/591991c048ba4c2798b4536b2ad9f8bd 2024-12-12T05:39:08,794 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/591991c048ba4c2798b4536b2ad9f8bd, entries=150, sequenceid=316, filesize=12.0 K 2024-12-12T05:39:08,795 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for 61279763b720b7a9988338e6150d61c7 in 905ms, sequenceid=316, compaction requested=false 2024-12-12T05:39:08,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:08,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:08,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-12-12T05:39:08,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-12-12T05:39:08,798 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-12-12T05:39:08,798 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8570 sec 2024-12-12T05:39:08,800 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 1.8620 sec 2024-12-12T05:39:09,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-12T05:39:09,043 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-12-12T05:39:09,044 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:39:09,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-12-12T05:39:09,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-12T05:39:09,046 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:39:09,047 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:39:09,047 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:39:09,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 61279763b720b7a9988338e6150d61c7 2024-12-12T05:39:09,096 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 61279763b720b7a9988338e6150d61c7 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-12T05:39:09,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=A 2024-12-12T05:39:09,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:09,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=B 2024-12-12T05:39:09,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:09,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=C 2024-12-12T05:39:09,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:09,105 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/0faaa088982745d08045f2787ed5efb1 is 50, key is test_row_0/A:col10/1733981947982/Put/seqid=0 2024-12-12T05:39:09,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:09,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:09,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982009111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:09,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982009112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:09,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:09,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982009114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:09,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:09,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982009115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:09,118 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:09,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982009116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:09,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741905_1081 (size=12301) 2024-12-12T05:39:09,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-12T05:39:09,199 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:09,199 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-12T05:39:09,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:09,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:09,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:09,200 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:09,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:09,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:09,218 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:09,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982009217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:09,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:09,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982009217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:09,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:09,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982009217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:09,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:09,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982009217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:09,221 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:09,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982009221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:09,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-12T05:39:09,352 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:09,352 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-12T05:39:09,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:09,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:09,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:09,352 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:09,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:09,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:09,421 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:09,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982009419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:09,421 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:09,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982009420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:09,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:09,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982009420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:09,424 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:09,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982009421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:09,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:09,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982009423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:09,505 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:09,505 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-12T05:39:09,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:09,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:09,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:09,505 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:09,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:09,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:09,522 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/0faaa088982745d08045f2787ed5efb1 2024-12-12T05:39:09,535 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/d8f2fee9bba349d2ba4b10fa280d1d8d is 50, key is test_row_0/B:col10/1733981947982/Put/seqid=0 2024-12-12T05:39:09,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741906_1082 (size=12301) 2024-12-12T05:39:09,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-12T05:39:09,657 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:09,658 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-12T05:39:09,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:09,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:09,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:09,658 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:09,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:09,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:09,725 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:09,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982009723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:09,725 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:09,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982009723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:09,726 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:09,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982009724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:09,727 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:09,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982009726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:09,728 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:09,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982009726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:09,810 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:09,811 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-12T05:39:09,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:09,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:09,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:09,811 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:09,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:09,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:09,954 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/d8f2fee9bba349d2ba4b10fa280d1d8d 2024-12-12T05:39:09,964 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:09,964 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-12T05:39:09,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:09,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:09,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:09,964 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:09,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:09,965 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/d47d7fbc880941dea50e1acd813c2e29 is 50, key is test_row_0/C:col10/1733981947982/Put/seqid=0 2024-12-12T05:39:09,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:09,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741907_1083 (size=12301) 2024-12-12T05:39:09,974 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/d47d7fbc880941dea50e1acd813c2e29 2024-12-12T05:39:09,981 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/0faaa088982745d08045f2787ed5efb1 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/0faaa088982745d08045f2787ed5efb1 2024-12-12T05:39:09,987 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/0faaa088982745d08045f2787ed5efb1, entries=150, sequenceid=337, filesize=12.0 K 2024-12-12T05:39:09,988 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/d8f2fee9bba349d2ba4b10fa280d1d8d as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/d8f2fee9bba349d2ba4b10fa280d1d8d 2024-12-12T05:39:09,995 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/d8f2fee9bba349d2ba4b10fa280d1d8d, entries=150, sequenceid=337, filesize=12.0 K 2024-12-12T05:39:10,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/d47d7fbc880941dea50e1acd813c2e29 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/d47d7fbc880941dea50e1acd813c2e29 2024-12-12T05:39:10,006 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/d47d7fbc880941dea50e1acd813c2e29, entries=150, sequenceid=337, filesize=12.0 K 2024-12-12T05:39:10,007 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 61279763b720b7a9988338e6150d61c7 in 911ms, sequenceid=337, compaction requested=true 2024-12-12T05:39:10,007 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:10,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:39:10,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:10,008 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:10,008 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:10,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:39:10,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:10,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:39:10,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:10,010 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:10,010 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/B is initiating minor compaction (all files) 2024-12-12T05:39:10,010 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/B in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:10,010 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/8be5550c71c14de98de4767e5427e1a8, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/f296d6c818354c538495a152e1c84838, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/d8f2fee9bba349d2ba4b10fa280d1d8d] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=36.7 K 2024-12-12T05:39:10,011 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:10,011 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/A is initiating minor compaction (all files) 2024-12-12T05:39:10,011 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/A in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:10,011 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/1ca21348768a4608ad2f8f7be53b1184, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/db37227e1f2045709b6f005c1cfc6947, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/0faaa088982745d08045f2787ed5efb1] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=36.7 K 2024-12-12T05:39:10,012 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 8be5550c71c14de98de4767e5427e1a8, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1733981946717 2024-12-12T05:39:10,012 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1ca21348768a4608ad2f8f7be53b1184, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1733981946717 2024-12-12T05:39:10,012 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting f296d6c818354c538495a152e1c84838, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733981946846 2024-12-12T05:39:10,012 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting db37227e1f2045709b6f005c1cfc6947, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733981946846 2024-12-12T05:39:10,013 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting d8f2fee9bba349d2ba4b10fa280d1d8d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1733981947979 2024-12-12T05:39:10,013 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0faaa088982745d08045f2787ed5efb1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1733981947979 2024-12-12T05:39:10,022 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#B#compaction#69 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:10,022 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#A#compaction#70 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:10,022 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/48d601b59b0e4b589db18ef5fa704fa1 is 50, key is test_row_0/B:col10/1733981947982/Put/seqid=0 2024-12-12T05:39:10,023 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/836dc2e21ef04c5f8f98b100edb738d9 is 50, key is test_row_0/A:col10/1733981947982/Put/seqid=0 2024-12-12T05:39:10,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741908_1084 (size=13085) 2024-12-12T05:39:10,036 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/836dc2e21ef04c5f8f98b100edb738d9 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/836dc2e21ef04c5f8f98b100edb738d9 2024-12-12T05:39:10,043 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 61279763b720b7a9988338e6150d61c7/A of 61279763b720b7a9988338e6150d61c7 into 836dc2e21ef04c5f8f98b100edb738d9(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:10,043 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:10,043 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/A, priority=13, startTime=1733981950008; duration=0sec 2024-12-12T05:39:10,043 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:10,043 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:A 2024-12-12T05:39:10,043 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:10,044 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:10,046 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/C is initiating minor compaction (all files) 2024-12-12T05:39:10,046 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/C in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:10,046 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/4fafd35ce189427fae5094904229ead4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/591991c048ba4c2798b4536b2ad9f8bd, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/d47d7fbc880941dea50e1acd813c2e29] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=36.7 K 2024-12-12T05:39:10,047 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4fafd35ce189427fae5094904229ead4, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1733981946717 2024-12-12T05:39:10,047 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 591991c048ba4c2798b4536b2ad9f8bd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733981946846 2024-12-12T05:39:10,048 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting d47d7fbc880941dea50e1acd813c2e29, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1733981947979 2024-12-12T05:39:10,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741909_1085 (size=13085) 2024-12-12T05:39:10,056 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#C#compaction#71 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:10,057 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/16066e0d9b254ed083c982839489b69b is 50, key is test_row_0/C:col10/1733981947982/Put/seqid=0 2024-12-12T05:39:10,062 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/48d601b59b0e4b589db18ef5fa704fa1 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/48d601b59b0e4b589db18ef5fa704fa1 2024-12-12T05:39:10,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741910_1086 (size=13085) 2024-12-12T05:39:10,071 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 61279763b720b7a9988338e6150d61c7/B of 61279763b720b7a9988338e6150d61c7 into 48d601b59b0e4b589db18ef5fa704fa1(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:10,071 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:10,071 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/B, priority=13, startTime=1733981950008; duration=0sec 2024-12-12T05:39:10,071 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:10,071 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:B 2024-12-12T05:39:10,073 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/16066e0d9b254ed083c982839489b69b as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/16066e0d9b254ed083c982839489b69b 2024-12-12T05:39:10,080 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 61279763b720b7a9988338e6150d61c7/C of 61279763b720b7a9988338e6150d61c7 into 16066e0d9b254ed083c982839489b69b(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:10,080 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:10,080 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/C, priority=13, startTime=1733981950008; duration=0sec 2024-12-12T05:39:10,080 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:10,080 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:C 2024-12-12T05:39:10,117 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:10,117 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-12T05:39:10,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:10,118 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 61279763b720b7a9988338e6150d61c7 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-12T05:39:10,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=A 2024-12-12T05:39:10,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:10,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=B 2024-12-12T05:39:10,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:10,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=C 2024-12-12T05:39:10,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:10,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/d44826d337bc45a88c3def29b671684e is 50, key is test_row_0/A:col10/1733981949114/Put/seqid=0 2024-12-12T05:39:10,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741911_1087 (size=12301) 2024-12-12T05:39:10,129 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=357 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/d44826d337bc45a88c3def29b671684e 2024-12-12T05:39:10,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/b8ab93bce230430982cb54d673afc043 is 50, key is test_row_0/B:col10/1733981949114/Put/seqid=0 2024-12-12T05:39:10,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741912_1088 (size=12301) 2024-12-12T05:39:10,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-12T05:39:10,229 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:10,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 61279763b720b7a9988338e6150d61c7 2024-12-12T05:39:10,240 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:10,240 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:10,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982010237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:10,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982010237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:10,240 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:10,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982010238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:10,241 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:10,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982010240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:10,241 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:10,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982010240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:10,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:10,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982010341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:10,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:10,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982010343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:10,346 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:10,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982010346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:10,348 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:10,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982010346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:10,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:10,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982010346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:10,546 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=357 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/b8ab93bce230430982cb54d673afc043 2024-12-12T05:39:10,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:10,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982010546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:10,550 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:10,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982010548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:10,551 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:10,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982010549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:10,552 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:10,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982010551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:10,552 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:10,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982010551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:10,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/b2387046ad924b3ea10d5de5788f6fc1 is 50, key is test_row_0/C:col10/1733981949114/Put/seqid=0 2024-12-12T05:39:10,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741913_1089 (size=12301) 2024-12-12T05:39:10,563 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=357 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/b2387046ad924b3ea10d5de5788f6fc1 2024-12-12T05:39:10,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/d44826d337bc45a88c3def29b671684e as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/d44826d337bc45a88c3def29b671684e 2024-12-12T05:39:10,575 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/d44826d337bc45a88c3def29b671684e, entries=150, sequenceid=357, filesize=12.0 K 2024-12-12T05:39:10,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/b8ab93bce230430982cb54d673afc043 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/b8ab93bce230430982cb54d673afc043 2024-12-12T05:39:10,583 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/b8ab93bce230430982cb54d673afc043, entries=150, sequenceid=357, filesize=12.0 K 2024-12-12T05:39:10,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/b2387046ad924b3ea10d5de5788f6fc1 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/b2387046ad924b3ea10d5de5788f6fc1 2024-12-12T05:39:10,592 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/b2387046ad924b3ea10d5de5788f6fc1, entries=150, sequenceid=357, filesize=12.0 K 2024-12-12T05:39:10,593 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 61279763b720b7a9988338e6150d61c7 in 476ms, sequenceid=357, compaction requested=false 2024-12-12T05:39:10,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:10,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:10,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-12-12T05:39:10,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-12-12T05:39:10,596 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-12-12T05:39:10,596 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5470 sec 2024-12-12T05:39:10,598 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 1.5520 sec 2024-12-12T05:39:10,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 61279763b720b7a9988338e6150d61c7 2024-12-12T05:39:10,851 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 61279763b720b7a9988338e6150d61c7 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-12T05:39:10,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=A 2024-12-12T05:39:10,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:10,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=B 2024-12-12T05:39:10,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:10,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=C 2024-12-12T05:39:10,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:10,861 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/17cd060f43fc499f9b9ec6e2eb87e851 is 50, key is test_row_0/A:col10/1733981950238/Put/seqid=0 2024-12-12T05:39:10,866 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:10,866 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:10,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982010861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:10,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982010862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:10,867 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:10,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982010864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:10,867 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:10,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982010862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:10,867 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:10,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982010864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:10,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741914_1090 (size=14741) 2024-12-12T05:39:10,874 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/17cd060f43fc499f9b9ec6e2eb87e851 2024-12-12T05:39:10,882 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/bb52a0ac865b497485c67f70e4e23e21 is 50, key is test_row_0/B:col10/1733981950238/Put/seqid=0 2024-12-12T05:39:10,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741915_1091 (size=12301) 2024-12-12T05:39:10,887 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/bb52a0ac865b497485c67f70e4e23e21 2024-12-12T05:39:10,896 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/70dabc3c4e7545bba79e8bcccb0a1843 is 50, key is test_row_0/C:col10/1733981950238/Put/seqid=0 2024-12-12T05:39:10,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741916_1092 (size=12301) 2024-12-12T05:39:10,900 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/70dabc3c4e7545bba79e8bcccb0a1843 2024-12-12T05:39:10,905 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/17cd060f43fc499f9b9ec6e2eb87e851 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/17cd060f43fc499f9b9ec6e2eb87e851 2024-12-12T05:39:10,910 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/17cd060f43fc499f9b9ec6e2eb87e851, entries=200, sequenceid=377, filesize=14.4 K 2024-12-12T05:39:10,911 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/bb52a0ac865b497485c67f70e4e23e21 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/bb52a0ac865b497485c67f70e4e23e21 2024-12-12T05:39:10,917 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/bb52a0ac865b497485c67f70e4e23e21, entries=150, sequenceid=377, filesize=12.0 K 2024-12-12T05:39:10,918 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/70dabc3c4e7545bba79e8bcccb0a1843 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/70dabc3c4e7545bba79e8bcccb0a1843 2024-12-12T05:39:10,927 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/70dabc3c4e7545bba79e8bcccb0a1843, entries=150, sequenceid=377, filesize=12.0 K 2024-12-12T05:39:10,928 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 61279763b720b7a9988338e6150d61c7 in 77ms, sequenceid=377, compaction requested=true 2024-12-12T05:39:10,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:10,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:39:10,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:10,928 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:10,928 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:10,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:39:10,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:10,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:39:10,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:10,930 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40127 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:10,930 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:10,930 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/A is initiating minor compaction (all files) 2024-12-12T05:39:10,930 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/B is initiating minor compaction (all files) 2024-12-12T05:39:10,930 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/A in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:10,930 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/B in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:10,930 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/836dc2e21ef04c5f8f98b100edb738d9, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/d44826d337bc45a88c3def29b671684e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/17cd060f43fc499f9b9ec6e2eb87e851] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=39.2 K 2024-12-12T05:39:10,930 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/48d601b59b0e4b589db18ef5fa704fa1, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/b8ab93bce230430982cb54d673afc043, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/bb52a0ac865b497485c67f70e4e23e21] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=36.8 K 2024-12-12T05:39:10,930 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 48d601b59b0e4b589db18ef5fa704fa1, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1733981947979 2024-12-12T05:39:10,931 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 836dc2e21ef04c5f8f98b100edb738d9, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1733981947979 2024-12-12T05:39:10,931 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting b8ab93bce230430982cb54d673afc043, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1733981949109 2024-12-12T05:39:10,931 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting d44826d337bc45a88c3def29b671684e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1733981949109 2024-12-12T05:39:10,931 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting bb52a0ac865b497485c67f70e4e23e21, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1733981950238 2024-12-12T05:39:10,932 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17cd060f43fc499f9b9ec6e2eb87e851, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1733981950237 2024-12-12T05:39:10,942 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#B#compaction#78 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:10,942 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#A#compaction#79 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:10,943 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/23391ef93b004fcca13c2201df2c80ba is 50, key is test_row_0/B:col10/1733981950238/Put/seqid=0 2024-12-12T05:39:10,943 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/2a05296a4d564c34bb2f7c56d1e20e4c is 50, key is test_row_0/A:col10/1733981950238/Put/seqid=0 2024-12-12T05:39:10,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741917_1093 (size=13187) 2024-12-12T05:39:10,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741918_1094 (size=13187) 2024-12-12T05:39:10,964 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/23391ef93b004fcca13c2201df2c80ba as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/23391ef93b004fcca13c2201df2c80ba 2024-12-12T05:39:10,969 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/2a05296a4d564c34bb2f7c56d1e20e4c as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/2a05296a4d564c34bb2f7c56d1e20e4c 2024-12-12T05:39:10,971 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 61279763b720b7a9988338e6150d61c7 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-12T05:39:10,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=A 2024-12-12T05:39:10,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:10,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=B 2024-12-12T05:39:10,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:10,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=C 2024-12-12T05:39:10,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:10,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 61279763b720b7a9988338e6150d61c7 2024-12-12T05:39:10,973 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 61279763b720b7a9988338e6150d61c7/B of 61279763b720b7a9988338e6150d61c7 into 23391ef93b004fcca13c2201df2c80ba(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:10,973 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:10,973 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/B, priority=13, startTime=1733981950928; duration=0sec 2024-12-12T05:39:10,973 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:10,973 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:B 2024-12-12T05:39:10,974 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:10,975 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:10,975 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/C is initiating minor compaction (all files) 2024-12-12T05:39:10,975 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/C in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:10,976 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/16066e0d9b254ed083c982839489b69b, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/b2387046ad924b3ea10d5de5788f6fc1, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/70dabc3c4e7545bba79e8bcccb0a1843] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=36.8 K 2024-12-12T05:39:10,977 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/82ff347611a34f22883da979b32abf20 is 50, key is test_row_0/A:col10/1733981950970/Put/seqid=0 2024-12-12T05:39:10,979 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 16066e0d9b254ed083c982839489b69b, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1733981947979 2024-12-12T05:39:10,979 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting b2387046ad924b3ea10d5de5788f6fc1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1733981949109 2024-12-12T05:39:10,980 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 70dabc3c4e7545bba79e8bcccb0a1843, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1733981950238 2024-12-12T05:39:10,988 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 61279763b720b7a9988338e6150d61c7/A of 61279763b720b7a9988338e6150d61c7 into 2a05296a4d564c34bb2f7c56d1e20e4c(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:10,989 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:10,989 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/A, priority=13, startTime=1733981950928; duration=0sec 2024-12-12T05:39:10,989 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:10,989 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:A 2024-12-12T05:39:10,994 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#C#compaction#81 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:10,994 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/91a89862d18147c2a3d6629065d7f6a2 is 50, key is test_row_0/C:col10/1733981950238/Put/seqid=0 2024-12-12T05:39:10,998 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:10,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982010994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:10,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:11,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982010997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:11,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982010997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:11,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982010998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,001 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:11,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982010998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741919_1095 (size=12301) 2024-12-12T05:39:11,006 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=396 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/82ff347611a34f22883da979b32abf20 2024-12-12T05:39:11,016 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/4b699416851343b881e4a62dd0c38e02 is 50, key is test_row_0/B:col10/1733981950970/Put/seqid=0 2024-12-12T05:39:11,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741920_1096 (size=13187) 2024-12-12T05:39:11,026 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/91a89862d18147c2a3d6629065d7f6a2 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/91a89862d18147c2a3d6629065d7f6a2 2024-12-12T05:39:11,034 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 61279763b720b7a9988338e6150d61c7/C of 61279763b720b7a9988338e6150d61c7 into 91a89862d18147c2a3d6629065d7f6a2(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:11,034 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:11,034 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/C, priority=13, startTime=1733981950929; duration=0sec 2024-12-12T05:39:11,034 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:11,034 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:C 2024-12-12T05:39:11,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741921_1097 (size=12301) 2024-12-12T05:39:11,037 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=396 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/4b699416851343b881e4a62dd0c38e02 2024-12-12T05:39:11,048 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/58c1f7e4dd3d4a48ad79ee8be83ad217 is 50, key is test_row_0/C:col10/1733981950970/Put/seqid=0 2024-12-12T05:39:11,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741922_1098 (size=12301) 2024-12-12T05:39:11,062 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=396 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/58c1f7e4dd3d4a48ad79ee8be83ad217 2024-12-12T05:39:11,071 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/82ff347611a34f22883da979b32abf20 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/82ff347611a34f22883da979b32abf20 2024-12-12T05:39:11,077 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/82ff347611a34f22883da979b32abf20, entries=150, sequenceid=396, filesize=12.0 K 2024-12-12T05:39:11,080 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/4b699416851343b881e4a62dd0c38e02 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/4b699416851343b881e4a62dd0c38e02 2024-12-12T05:39:11,086 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/4b699416851343b881e4a62dd0c38e02, entries=150, sequenceid=396, filesize=12.0 K 2024-12-12T05:39:11,091 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/58c1f7e4dd3d4a48ad79ee8be83ad217 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/58c1f7e4dd3d4a48ad79ee8be83ad217 2024-12-12T05:39:11,099 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/58c1f7e4dd3d4a48ad79ee8be83ad217, entries=150, sequenceid=396, filesize=12.0 K 2024-12-12T05:39:11,102 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 61279763b720b7a9988338e6150d61c7 in 130ms, sequenceid=396, compaction requested=false 2024-12-12T05:39:11,102 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:11,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 61279763b720b7a9988338e6150d61c7 2024-12-12T05:39:11,104 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 61279763b720b7a9988338e6150d61c7 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-12T05:39:11,107 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=A 2024-12-12T05:39:11,107 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:11,107 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=B 2024-12-12T05:39:11,108 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:11,108 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=C 2024-12-12T05:39:11,108 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:11,113 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/c8352ecb2feb459da9c4567cab700eec is 50, key is test_row_0/A:col10/1733981951104/Put/seqid=0 2024-12-12T05:39:11,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:11,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982011118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,123 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:11,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982011119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,125 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:11,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982011122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,125 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:11,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:11,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982011122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982011123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741923_1099 (size=12301) 2024-12-12T05:39:11,146 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=420 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/c8352ecb2feb459da9c4567cab700eec 2024-12-12T05:39:11,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-12T05:39:11,150 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-12-12T05:39:11,152 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:39:11,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-12-12T05:39:11,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-12T05:39:11,155 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:39:11,156 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:39:11,156 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:39:11,165 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/d2d62fe045f54039a7c0953b165d25fe is 50, key is test_row_0/B:col10/1733981951104/Put/seqid=0 2024-12-12T05:39:11,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741924_1100 (size=12301) 2024-12-12T05:39:11,190 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=420 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/d2d62fe045f54039a7c0953b165d25fe 2024-12-12T05:39:11,202 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/de887883041c4cbbb67180ededd7744a is 50, key is test_row_0/C:col10/1733981951104/Put/seqid=0 2024-12-12T05:39:11,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741925_1101 (size=12301) 2024-12-12T05:39:11,217 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=420 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/de887883041c4cbbb67180ededd7744a 2024-12-12T05:39:11,223 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/c8352ecb2feb459da9c4567cab700eec as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/c8352ecb2feb459da9c4567cab700eec 2024-12-12T05:39:11,224 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:11,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982011223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,225 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:11,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982011224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:11,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982011226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:11,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982011227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:11,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982011227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,230 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/c8352ecb2feb459da9c4567cab700eec, entries=150, sequenceid=420, filesize=12.0 K 2024-12-12T05:39:11,231 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/d2d62fe045f54039a7c0953b165d25fe as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/d2d62fe045f54039a7c0953b165d25fe 2024-12-12T05:39:11,237 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/d2d62fe045f54039a7c0953b165d25fe, entries=150, sequenceid=420, filesize=12.0 K 2024-12-12T05:39:11,239 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/de887883041c4cbbb67180ededd7744a as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/de887883041c4cbbb67180ededd7744a 2024-12-12T05:39:11,244 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/de887883041c4cbbb67180ededd7744a, entries=150, sequenceid=420, filesize=12.0 K 2024-12-12T05:39:11,246 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 61279763b720b7a9988338e6150d61c7 in 142ms, sequenceid=420, compaction requested=true 2024-12-12T05:39:11,246 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:11,246 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:11,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:39:11,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:11,247 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:11,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:39:11,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:11,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:39:11,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:11,248 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:11,248 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/A is initiating minor compaction (all files) 2024-12-12T05:39:11,248 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/A in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:11,248 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/2a05296a4d564c34bb2f7c56d1e20e4c, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/82ff347611a34f22883da979b32abf20, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/c8352ecb2feb459da9c4567cab700eec] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=36.9 K 2024-12-12T05:39:11,249 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:11,249 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/B is initiating minor compaction (all files) 2024-12-12T05:39:11,249 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2a05296a4d564c34bb2f7c56d1e20e4c, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1733981950238 2024-12-12T05:39:11,249 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/B in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:11,249 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/23391ef93b004fcca13c2201df2c80ba, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/4b699416851343b881e4a62dd0c38e02, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/d2d62fe045f54039a7c0953b165d25fe] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=36.9 K 2024-12-12T05:39:11,249 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 23391ef93b004fcca13c2201df2c80ba, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1733981950238 2024-12-12T05:39:11,249 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 82ff347611a34f22883da979b32abf20, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=396, earliestPutTs=1733981950861 2024-12-12T05:39:11,250 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b699416851343b881e4a62dd0c38e02, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=396, earliestPutTs=1733981950861 2024-12-12T05:39:11,250 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting d2d62fe045f54039a7c0953b165d25fe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1733981950996 2024-12-12T05:39:11,251 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting c8352ecb2feb459da9c4567cab700eec, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1733981950996 2024-12-12T05:39:11,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-12T05:39:11,263 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#A#compaction#87 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:11,264 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#B#compaction#88 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:11,264 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/2878742d9f214cd9938c4bc3906b991f is 50, key is test_row_0/A:col10/1733981951104/Put/seqid=0 2024-12-12T05:39:11,264 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/ee5cc8d68fae44d89aee610dd6fc3b26 is 50, key is test_row_0/B:col10/1733981951104/Put/seqid=0 2024-12-12T05:39:11,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741926_1102 (size=13289) 2024-12-12T05:39:11,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741927_1103 (size=13289) 2024-12-12T05:39:11,280 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/2878742d9f214cd9938c4bc3906b991f as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/2878742d9f214cd9938c4bc3906b991f 2024-12-12T05:39:11,287 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 61279763b720b7a9988338e6150d61c7/A of 61279763b720b7a9988338e6150d61c7 into 2878742d9f214cd9938c4bc3906b991f(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:11,287 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:11,287 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/A, priority=13, startTime=1733981951246; duration=0sec 2024-12-12T05:39:11,287 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:11,287 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:A 2024-12-12T05:39:11,287 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:11,289 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:11,289 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/C is initiating minor compaction (all files) 2024-12-12T05:39:11,289 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/C in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:11,289 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/91a89862d18147c2a3d6629065d7f6a2, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/58c1f7e4dd3d4a48ad79ee8be83ad217, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/de887883041c4cbbb67180ededd7744a] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=36.9 K 2024-12-12T05:39:11,290 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 91a89862d18147c2a3d6629065d7f6a2, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1733981950238 2024-12-12T05:39:11,290 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 58c1f7e4dd3d4a48ad79ee8be83ad217, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=396, earliestPutTs=1733981950861 2024-12-12T05:39:11,290 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting de887883041c4cbbb67180ededd7744a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1733981950996 2024-12-12T05:39:11,297 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#C#compaction#89 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:11,297 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/f3d26b9bb1b3404aacbbaa951b43119b is 50, key is test_row_0/C:col10/1733981951104/Put/seqid=0 2024-12-12T05:39:11,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741928_1104 (size=13289) 2024-12-12T05:39:11,310 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,311 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-12T05:39:11,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:11,311 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing 61279763b720b7a9988338e6150d61c7 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-12T05:39:11,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=A 2024-12-12T05:39:11,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:11,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=B 2024-12-12T05:39:11,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:11,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=C 2024-12-12T05:39:11,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:11,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/81c8efefa88d4102a1282e4e7017345e is 50, key is test_row_0/A:col10/1733981951121/Put/seqid=0 2024-12-12T05:39:11,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741929_1105 (size=12301) 2024-12-12T05:39:11,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 61279763b720b7a9988338e6150d61c7 2024-12-12T05:39:11,427 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:11,447 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:11,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982011443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,447 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:11,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982011444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,447 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:11,447 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:11,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982011444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982011445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,448 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:11,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982011446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-12T05:39:11,550 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:11,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982011548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,550 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:11,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982011548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,550 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:11,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982011548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:11,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982011548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:11,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982011548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,675 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/ee5cc8d68fae44d89aee610dd6fc3b26 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/ee5cc8d68fae44d89aee610dd6fc3b26 2024-12-12T05:39:11,682 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 61279763b720b7a9988338e6150d61c7/B of 61279763b720b7a9988338e6150d61c7 into ee5cc8d68fae44d89aee610dd6fc3b26(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:11,682 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:11,682 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/B, priority=13, startTime=1733981951247; duration=0sec 2024-12-12T05:39:11,682 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:11,682 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:B 2024-12-12T05:39:11,712 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/f3d26b9bb1b3404aacbbaa951b43119b as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/f3d26b9bb1b3404aacbbaa951b43119b 2024-12-12T05:39:11,720 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 61279763b720b7a9988338e6150d61c7/C of 61279763b720b7a9988338e6150d61c7 into f3d26b9bb1b3404aacbbaa951b43119b(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:11,720 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:11,720 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/C, priority=13, startTime=1733981951247; duration=0sec 2024-12-12T05:39:11,720 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:11,720 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:C 2024-12-12T05:39:11,721 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=437 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/81c8efefa88d4102a1282e4e7017345e 2024-12-12T05:39:11,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/0bb6a37f22844994944c88ec2b371a9e is 50, key is test_row_0/B:col10/1733981951121/Put/seqid=0 2024-12-12T05:39:11,752 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:11,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982011751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741930_1106 (size=12301) 2024-12-12T05:39:11,753 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:11,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982011751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:11,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982011753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,754 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=437 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/0bb6a37f22844994944c88ec2b371a9e 2024-12-12T05:39:11,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:11,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982011753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,755 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:11,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982011753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:11,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-12T05:39:11,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/67c389444a95425583abc9beb5935b7e is 50, key is test_row_0/C:col10/1733981951121/Put/seqid=0 2024-12-12T05:39:11,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741931_1107 (size=12301) 2024-12-12T05:39:12,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:12,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982012055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:12,057 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:12,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982012056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:12,057 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:12,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982012056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:12,058 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:12,058 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:12,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982012056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:12,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982012056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:12,166 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=437 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/67c389444a95425583abc9beb5935b7e 2024-12-12T05:39:12,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/81c8efefa88d4102a1282e4e7017345e as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/81c8efefa88d4102a1282e4e7017345e 2024-12-12T05:39:12,187 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/81c8efefa88d4102a1282e4e7017345e, entries=150, sequenceid=437, filesize=12.0 K 2024-12-12T05:39:12,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/0bb6a37f22844994944c88ec2b371a9e as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/0bb6a37f22844994944c88ec2b371a9e 2024-12-12T05:39:12,198 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/0bb6a37f22844994944c88ec2b371a9e, entries=150, sequenceid=437, filesize=12.0 K 2024-12-12T05:39:12,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/67c389444a95425583abc9beb5935b7e as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/67c389444a95425583abc9beb5935b7e 2024-12-12T05:39:12,204 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/67c389444a95425583abc9beb5935b7e, entries=150, sequenceid=437, filesize=12.0 K 2024-12-12T05:39:12,205 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 61279763b720b7a9988338e6150d61c7 in 894ms, sequenceid=437, compaction requested=false 2024-12-12T05:39:12,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:12,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:12,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-12-12T05:39:12,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-12-12T05:39:12,210 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-12-12T05:39:12,210 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0510 sec 2024-12-12T05:39:12,211 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 1.0580 sec 2024-12-12T05:39:12,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-12T05:39:12,257 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-12-12T05:39:12,258 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:39:12,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-12-12T05:39:12,259 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:39:12,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-12T05:39:12,260 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:39:12,260 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:39:12,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-12T05:39:12,412 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:12,412 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-12T05:39:12,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:12,412 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing 61279763b720b7a9988338e6150d61c7 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-12T05:39:12,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=A 2024-12-12T05:39:12,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:12,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=B 2024-12-12T05:39:12,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:12,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=C 2024-12-12T05:39:12,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:12,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/97bd687b8e0a41439052c191af35b63b is 50, key is test_row_0/A:col10/1733981951444/Put/seqid=0 2024-12-12T05:39:12,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741932_1108 (size=12301) 2024-12-12T05:39:12,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 61279763b720b7a9988338e6150d61c7 2024-12-12T05:39:12,561 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:12,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-12T05:39:12,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:12,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982012565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:12,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:12,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982012567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:12,570 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:12,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982012567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:12,570 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:12,570 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:12,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982012568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:12,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982012568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:12,670 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:12,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982012669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:12,670 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:12,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982012669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:12,673 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:12,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982012673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:12,674 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:12,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982012674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:12,674 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:12,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982012674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:12,821 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/97bd687b8e0a41439052c191af35b63b 2024-12-12T05:39:12,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/18aec951166e4684b6c82b6deb80459e is 50, key is test_row_0/B:col10/1733981951444/Put/seqid=0 2024-12-12T05:39:12,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741933_1109 (size=12301) 2024-12-12T05:39:12,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-12T05:39:12,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:12,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982012871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:12,873 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:12,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982012872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:12,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:12,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982012875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:12,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:12,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982012879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:12,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:12,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982012879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:13,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:13,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982013176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:13,178 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:13,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982013176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:13,179 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:13,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982013178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:13,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:13,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982013182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:13,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:13,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982013182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:13,233 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/18aec951166e4684b6c82b6deb80459e 2024-12-12T05:39:13,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/57b9383f3f094d939de1b4674f842b1e is 50, key is test_row_0/C:col10/1733981951444/Put/seqid=0 2024-12-12T05:39:13,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741934_1110 (size=12301) 2024-12-12T05:39:13,247 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/57b9383f3f094d939de1b4674f842b1e 2024-12-12T05:39:13,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/97bd687b8e0a41439052c191af35b63b as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/97bd687b8e0a41439052c191af35b63b 2024-12-12T05:39:13,256 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/97bd687b8e0a41439052c191af35b63b, entries=150, sequenceid=460, filesize=12.0 K 2024-12-12T05:39:13,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/18aec951166e4684b6c82b6deb80459e as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/18aec951166e4684b6c82b6deb80459e 2024-12-12T05:39:13,262 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/18aec951166e4684b6c82b6deb80459e, entries=150, sequenceid=460, filesize=12.0 K 2024-12-12T05:39:13,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/57b9383f3f094d939de1b4674f842b1e as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/57b9383f3f094d939de1b4674f842b1e 2024-12-12T05:39:13,271 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/57b9383f3f094d939de1b4674f842b1e, entries=150, sequenceid=460, filesize=12.0 K 2024-12-12T05:39:13,272 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=93.93 KB/96180 for 61279763b720b7a9988338e6150d61c7 in 859ms, sequenceid=460, compaction requested=true 2024-12-12T05:39:13,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:13,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:13,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-12-12T05:39:13,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-12-12T05:39:13,274 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-12-12T05:39:13,274 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0130 sec 2024-12-12T05:39:13,275 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 1.0170 sec 2024-12-12T05:39:13,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-12T05:39:13,363 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-12-12T05:39:13,364 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:39:13,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-12-12T05:39:13,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-12T05:39:13,365 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:39:13,366 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:39:13,366 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:39:13,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-12T05:39:13,517 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:13,517 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-12T05:39:13,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:13,518 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2837): Flushing 61279763b720b7a9988338e6150d61c7 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-12T05:39:13,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=A 2024-12-12T05:39:13,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:13,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=B 2024-12-12T05:39:13,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:13,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=C 2024-12-12T05:39:13,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:13,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/dd28a07ab22443bba2542e69b9f56789 is 50, key is test_row_0/A:col10/1733981952566/Put/seqid=0 2024-12-12T05:39:13,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741935_1111 (size=12301) 2024-12-12T05:39:13,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-12T05:39:13,694 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:13,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 61279763b720b7a9988338e6150d61c7 2024-12-12T05:39:13,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:13,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:13,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982013703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:13,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982013704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:13,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:13,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982013704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:13,710 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:13,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982013707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:13,710 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:13,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982013707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:13,809 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:13,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982013808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:13,810 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:13,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982013808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:13,810 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:13,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982013809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:13,814 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:13,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982013811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:13,814 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:13,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982013811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:13,926 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=477 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/dd28a07ab22443bba2542e69b9f56789 2024-12-12T05:39:13,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/139e317bc80740609f2cb49a7d369881 is 50, key is test_row_0/B:col10/1733981952566/Put/seqid=0 2024-12-12T05:39:13,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741936_1112 (size=12301) 2024-12-12T05:39:13,938 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=477 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/139e317bc80740609f2cb49a7d369881 2024-12-12T05:39:13,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/97a5c68813984d68b85bc64cc08832f3 is 50, key is test_row_0/C:col10/1733981952566/Put/seqid=0 2024-12-12T05:39:13,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741937_1113 (size=12301) 2024-12-12T05:39:13,957 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=477 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/97a5c68813984d68b85bc64cc08832f3 2024-12-12T05:39:13,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/dd28a07ab22443bba2542e69b9f56789 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/dd28a07ab22443bba2542e69b9f56789 2024-12-12T05:39:13,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-12T05:39:13,972 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/dd28a07ab22443bba2542e69b9f56789, entries=150, sequenceid=477, filesize=12.0 K 2024-12-12T05:39:13,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/139e317bc80740609f2cb49a7d369881 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/139e317bc80740609f2cb49a7d369881 2024-12-12T05:39:13,978 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/139e317bc80740609f2cb49a7d369881, entries=150, sequenceid=477, filesize=12.0 K 2024-12-12T05:39:13,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/97a5c68813984d68b85bc64cc08832f3 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/97a5c68813984d68b85bc64cc08832f3 2024-12-12T05:39:13,987 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/97a5c68813984d68b85bc64cc08832f3, entries=150, sequenceid=477, filesize=12.0 K 2024-12-12T05:39:13,988 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for 61279763b720b7a9988338e6150d61c7 in 469ms, sequenceid=477, compaction requested=true 2024-12-12T05:39:13,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:13,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:13,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-12-12T05:39:13,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-12-12T05:39:13,990 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-12-12T05:39:13,990 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 623 msec 2024-12-12T05:39:13,991 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 626 msec 2024-12-12T05:39:14,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 61279763b720b7a9988338e6150d61c7 2024-12-12T05:39:14,014 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 61279763b720b7a9988338e6150d61c7 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-12T05:39:14,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=A 2024-12-12T05:39:14,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:14,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=B 2024-12-12T05:39:14,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:14,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=C 2024-12-12T05:39:14,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:14,019 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/9cd4c849efc9411a87859243005ac7c7 is 50, key is test_row_0/A:col10/1733981953703/Put/seqid=0 2024-12-12T05:39:14,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741938_1114 (size=17181) 2024-12-12T05:39:14,027 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:14,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982014025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:14,028 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:14,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982014025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:14,028 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:14,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982014026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:14,030 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:14,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982014026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:14,030 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:14,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982014027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:14,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:14,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982014128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:14,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:14,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982014129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:14,130 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:14,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982014129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:14,132 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:14,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982014131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:14,132 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:14,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982014131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:14,333 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:14,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982014330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:14,333 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:14,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982014331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:14,333 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:14,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982014331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:14,335 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:14,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982014333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:14,335 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:14,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982014334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:14,423 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=499 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/9cd4c849efc9411a87859243005ac7c7 2024-12-12T05:39:14,431 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/8bd5fee487634579b5a4ecbfdd9337a0 is 50, key is test_row_0/B:col10/1733981953703/Put/seqid=0 2024-12-12T05:39:14,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741939_1115 (size=12301) 2024-12-12T05:39:14,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-12T05:39:14,468 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-12-12T05:39:14,470 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:39:14,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees 2024-12-12T05:39:14,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-12T05:39:14,471 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:39:14,471 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:39:14,471 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:39:14,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-12T05:39:14,622 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:14,623 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-12T05:39:14,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:14,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:14,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:14,623 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:14,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:14,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:14,636 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:14,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982014635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:14,638 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:14,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982014636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:14,638 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:14,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982014636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:14,638 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:14,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982014637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:14,639 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:14,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982014637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:14,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-12T05:39:14,775 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:14,776 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-12T05:39:14,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:14,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:14,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:14,776 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:14,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:14,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:14,836 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=499 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/8bd5fee487634579b5a4ecbfdd9337a0 2024-12-12T05:39:14,844 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/8b23c3ee48ed44b18b40ded4dcf004e1 is 50, key is test_row_0/C:col10/1733981953703/Put/seqid=0 2024-12-12T05:39:14,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741940_1116 (size=12301) 2024-12-12T05:39:14,928 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:14,928 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-12T05:39:14,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:14,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:14,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:14,928 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:14,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:14,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:15,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-12T05:39:15,080 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:15,081 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-12T05:39:15,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:15,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:15,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:15,081 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:15,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:15,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:15,138 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:15,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982015138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:15,140 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:15,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982015140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:15,143 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:15,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982015142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:15,144 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:15,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982015143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:15,145 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:15,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982015144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:15,233 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:15,233 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-12T05:39:15,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:15,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:15,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:15,233 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:15,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:15,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:15,257 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=499 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/8b23c3ee48ed44b18b40ded4dcf004e1 2024-12-12T05:39:15,262 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/9cd4c849efc9411a87859243005ac7c7 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/9cd4c849efc9411a87859243005ac7c7 2024-12-12T05:39:15,266 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/9cd4c849efc9411a87859243005ac7c7, entries=250, sequenceid=499, filesize=16.8 K 2024-12-12T05:39:15,267 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/8bd5fee487634579b5a4ecbfdd9337a0 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/8bd5fee487634579b5a4ecbfdd9337a0 2024-12-12T05:39:15,271 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/8bd5fee487634579b5a4ecbfdd9337a0, entries=150, sequenceid=499, filesize=12.0 K 2024-12-12T05:39:15,272 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/8b23c3ee48ed44b18b40ded4dcf004e1 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/8b23c3ee48ed44b18b40ded4dcf004e1 2024-12-12T05:39:15,276 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/8b23c3ee48ed44b18b40ded4dcf004e1, entries=150, sequenceid=499, filesize=12.0 K 2024-12-12T05:39:15,277 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 61279763b720b7a9988338e6150d61c7 in 1263ms, sequenceid=499, compaction requested=true 2024-12-12T05:39:15,277 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:15,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:39:15,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:15,277 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-12T05:39:15,277 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-12T05:39:15,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:39:15,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:15,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:39:15,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:15,279 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 67373 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-12T05:39:15,279 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62493 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-12T05:39:15,279 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/B is initiating minor compaction (all files) 2024-12-12T05:39:15,279 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/A is initiating minor compaction (all files) 2024-12-12T05:39:15,279 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/B in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:15,279 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/A in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:15,279 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/ee5cc8d68fae44d89aee610dd6fc3b26, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/0bb6a37f22844994944c88ec2b371a9e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/18aec951166e4684b6c82b6deb80459e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/139e317bc80740609f2cb49a7d369881, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/8bd5fee487634579b5a4ecbfdd9337a0] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=61.0 K 2024-12-12T05:39:15,279 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/2878742d9f214cd9938c4bc3906b991f, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/81c8efefa88d4102a1282e4e7017345e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/97bd687b8e0a41439052c191af35b63b, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/dd28a07ab22443bba2542e69b9f56789, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/9cd4c849efc9411a87859243005ac7c7] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=65.8 K 2024-12-12T05:39:15,279 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting ee5cc8d68fae44d89aee610dd6fc3b26, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1733981950996 2024-12-12T05:39:15,279 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2878742d9f214cd9938c4bc3906b991f, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1733981950996 2024-12-12T05:39:15,280 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 81c8efefa88d4102a1282e4e7017345e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1733981951120 2024-12-12T05:39:15,280 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 0bb6a37f22844994944c88ec2b371a9e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1733981951120 2024-12-12T05:39:15,280 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 97bd687b8e0a41439052c191af35b63b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1733981951442 2024-12-12T05:39:15,280 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 18aec951166e4684b6c82b6deb80459e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1733981951442 2024-12-12T05:39:15,280 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd28a07ab22443bba2542e69b9f56789, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=477, earliestPutTs=1733981952566 2024-12-12T05:39:15,280 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 139e317bc80740609f2cb49a7d369881, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=477, earliestPutTs=1733981952566 2024-12-12T05:39:15,280 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9cd4c849efc9411a87859243005ac7c7, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1733981953702 2024-12-12T05:39:15,281 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 8bd5fee487634579b5a4ecbfdd9337a0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1733981953703 2024-12-12T05:39:15,290 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#A#compaction#102 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:15,290 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/ff5e8ec64c45494aaf72fd2027061117 is 50, key is test_row_0/A:col10/1733981953703/Put/seqid=0 2024-12-12T05:39:15,293 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#B#compaction#103 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:15,293 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/2786216dae304feb87343d0e788f7ad4 is 50, key is test_row_0/B:col10/1733981953703/Put/seqid=0 2024-12-12T05:39:15,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741941_1117 (size=13459) 2024-12-12T05:39:15,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741942_1118 (size=13459) 2024-12-12T05:39:15,309 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/ff5e8ec64c45494aaf72fd2027061117 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/ff5e8ec64c45494aaf72fd2027061117 2024-12-12T05:39:15,315 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 61279763b720b7a9988338e6150d61c7/A of 61279763b720b7a9988338e6150d61c7 into ff5e8ec64c45494aaf72fd2027061117(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:15,315 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:15,315 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/A, priority=11, startTime=1733981955277; duration=0sec 2024-12-12T05:39:15,315 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:15,315 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:A 2024-12-12T05:39:15,315 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-12T05:39:15,316 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/2786216dae304feb87343d0e788f7ad4 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/2786216dae304feb87343d0e788f7ad4 2024-12-12T05:39:15,322 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62493 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-12T05:39:15,322 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/C is initiating minor compaction (all files) 2024-12-12T05:39:15,322 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/C in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:15,323 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/f3d26b9bb1b3404aacbbaa951b43119b, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/67c389444a95425583abc9beb5935b7e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/57b9383f3f094d939de1b4674f842b1e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/97a5c68813984d68b85bc64cc08832f3, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/8b23c3ee48ed44b18b40ded4dcf004e1] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=61.0 K 2024-12-12T05:39:15,323 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting f3d26b9bb1b3404aacbbaa951b43119b, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1733981950996 2024-12-12T05:39:15,323 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 67c389444a95425583abc9beb5935b7e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1733981951120 2024-12-12T05:39:15,324 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 57b9383f3f094d939de1b4674f842b1e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1733981951442 2024-12-12T05:39:15,324 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 97a5c68813984d68b85bc64cc08832f3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=477, earliestPutTs=1733981952566 2024-12-12T05:39:15,324 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b23c3ee48ed44b18b40ded4dcf004e1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1733981953703 2024-12-12T05:39:15,327 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 61279763b720b7a9988338e6150d61c7/B of 61279763b720b7a9988338e6150d61c7 into 2786216dae304feb87343d0e788f7ad4(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:15,327 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:15,327 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/B, priority=11, startTime=1733981955277; duration=0sec 2024-12-12T05:39:15,327 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:15,327 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:B 2024-12-12T05:39:15,335 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#C#compaction#104 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:15,336 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/3e20c64ef5c3420ea6af6a506a608192 is 50, key is test_row_0/C:col10/1733981953703/Put/seqid=0 2024-12-12T05:39:15,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741943_1119 (size=13459) 2024-12-12T05:39:15,385 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:15,386 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-12T05:39:15,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:15,386 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2837): Flushing 61279763b720b7a9988338e6150d61c7 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-12T05:39:15,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=A 2024-12-12T05:39:15,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:15,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=B 2024-12-12T05:39:15,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:15,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=C 2024-12-12T05:39:15,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:15,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/a223a23a4e59432daaf6c6c34e98ea9a is 50, key is test_row_0/A:col10/1733981954025/Put/seqid=0 2024-12-12T05:39:15,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741944_1120 (size=12301) 2024-12-12T05:39:15,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-12T05:39:15,745 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/3e20c64ef5c3420ea6af6a506a608192 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/3e20c64ef5c3420ea6af6a506a608192 2024-12-12T05:39:15,751 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 61279763b720b7a9988338e6150d61c7/C of 61279763b720b7a9988338e6150d61c7 into 3e20c64ef5c3420ea6af6a506a608192(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:15,751 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:15,751 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/C, priority=11, startTime=1733981955277; duration=0sec 2024-12-12T05:39:15,751 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:15,751 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:C 2024-12-12T05:39:15,795 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=515 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/a223a23a4e59432daaf6c6c34e98ea9a 2024-12-12T05:39:15,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/b70bf562412b4d5ea5bb3b37a6e12d09 is 50, key is test_row_0/B:col10/1733981954025/Put/seqid=0 2024-12-12T05:39:15,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741945_1121 (size=12301) 2024-12-12T05:39:15,807 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=515 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/b70bf562412b4d5ea5bb3b37a6e12d09 2024-12-12T05:39:15,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/507b3a9caef04fcdb615b7ededf90476 is 50, key is test_row_0/C:col10/1733981954025/Put/seqid=0 2024-12-12T05:39:15,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741946_1122 (size=12301) 2024-12-12T05:39:16,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 61279763b720b7a9988338e6150d61c7 2024-12-12T05:39:16,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:16,161 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:16,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982016157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:16,161 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:16,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982016158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:16,162 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:16,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982016160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:16,162 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:16,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982016160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:16,162 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:16,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982016161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:16,220 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=515 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/507b3a9caef04fcdb615b7ededf90476 2024-12-12T05:39:16,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/a223a23a4e59432daaf6c6c34e98ea9a as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/a223a23a4e59432daaf6c6c34e98ea9a 2024-12-12T05:39:16,230 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/a223a23a4e59432daaf6c6c34e98ea9a, entries=150, sequenceid=515, filesize=12.0 K 2024-12-12T05:39:16,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/b70bf562412b4d5ea5bb3b37a6e12d09 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/b70bf562412b4d5ea5bb3b37a6e12d09 2024-12-12T05:39:16,236 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/b70bf562412b4d5ea5bb3b37a6e12d09, entries=150, sequenceid=515, filesize=12.0 K 2024-12-12T05:39:16,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/507b3a9caef04fcdb615b7ededf90476 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/507b3a9caef04fcdb615b7ededf90476 2024-12-12T05:39:16,242 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/507b3a9caef04fcdb615b7ededf90476, entries=150, sequenceid=515, filesize=12.0 K 2024-12-12T05:39:16,243 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 61279763b720b7a9988338e6150d61c7 in 857ms, sequenceid=515, compaction requested=false 2024-12-12T05:39:16,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2538): Flush status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:16,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:16,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=33 2024-12-12T05:39:16,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=33 2024-12-12T05:39:16,245 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-12-12T05:39:16,245 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7730 sec 2024-12-12T05:39:16,246 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees in 1.7750 sec 2024-12-12T05:39:16,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 61279763b720b7a9988338e6150d61c7 2024-12-12T05:39:16,263 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 61279763b720b7a9988338e6150d61c7 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T05:39:16,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=A 2024-12-12T05:39:16,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:16,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=B 2024-12-12T05:39:16,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:16,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=C 2024-12-12T05:39:16,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:16,269 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/ef9235e23c2b4edb84b76f35d4e112ea is 50, key is test_row_0/A:col10/1733981956262/Put/seqid=0 2024-12-12T05:39:16,269 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:16,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982016267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:16,269 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:16,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982016268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:16,269 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:16,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982016268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:16,270 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:16,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982016268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:16,271 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:16,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982016269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:16,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741947_1123 (size=19621) 2024-12-12T05:39:16,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:16,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982016370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:16,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:16,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982016370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:16,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:16,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982016371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:16,373 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:16,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982016371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:16,374 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:16,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982016372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:16,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:16,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982016571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:16,574 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:16,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982016573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:16,574 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:16,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-12T05:39:16,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982016573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:16,574 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-12-12T05:39:16,575 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:39:16,575 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:16,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982016574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:16,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:16,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982016575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:16,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees 2024-12-12T05:39:16,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-12T05:39:16,577 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=34, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:39:16,577 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=34, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:39:16,577 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:39:16,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-12T05:39:16,678 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=542 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/ef9235e23c2b4edb84b76f35d4e112ea 2024-12-12T05:39:16,694 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/1f3036e27f374a63b412be44f0e5d7f5 is 50, key is test_row_0/B:col10/1733981956262/Put/seqid=0 2024-12-12T05:39:16,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741948_1124 (size=12301) 2024-12-12T05:39:16,728 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:16,729 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-12-12T05:39:16,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:16,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:16,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:16,729 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:16,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:16,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:16,845 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x295cb1ac to 127.0.0.1:60303 2024-12-12T05:39:16,845 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1d2a8e08 to 127.0.0.1:60303 2024-12-12T05:39:16,845 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:39:16,845 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:39:16,846 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x70267494 to 127.0.0.1:60303 2024-12-12T05:39:16,846 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:39:16,849 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c915d17 to 127.0.0.1:60303 2024-12-12T05:39:16,849 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:39:16,875 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:16,875 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:16,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982016875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:16,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982016875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:16,876 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:16,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982016876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:16,877 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:16,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982016877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:16,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:16,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-12T05:39:16,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982016878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:16,881 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:16,881 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-12-12T05:39:16,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:16,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:16,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:16,882 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:16,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:16,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:16,986 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T05:39:17,035 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:17,036 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-12-12T05:39:17,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:17,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:17,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:17,037 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:17,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:17,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:17,100 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=542 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/1f3036e27f374a63b412be44f0e5d7f5 2024-12-12T05:39:17,114 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/a4e3e5fff5484f33836b667eb6e3f077 is 50, key is test_row_0/C:col10/1733981956262/Put/seqid=0 2024-12-12T05:39:17,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741949_1125 (size=12301) 2024-12-12T05:39:17,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-12T05:39:17,192 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:17,192 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-12-12T05:39:17,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:17,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:17,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:17,193 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:17,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:17,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:17,345 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:17,345 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-12-12T05:39:17,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:17,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:17,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:17,346 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:17,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:17,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:17,379 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:17,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46360 deadline: 1733982017379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:17,379 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:17,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:17,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46372 deadline: 1733982017379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:17,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46376 deadline: 1733982017379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:17,381 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:17,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46358 deadline: 1733982017380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:17,382 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:17,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46384 deadline: 1733982017381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:17,499 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:17,500 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-12-12T05:39:17,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:17,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:17,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:17,500 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:17,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:17,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:17,520 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=542 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/a4e3e5fff5484f33836b667eb6e3f077 2024-12-12T05:39:17,528 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/ef9235e23c2b4edb84b76f35d4e112ea as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/ef9235e23c2b4edb84b76f35d4e112ea 2024-12-12T05:39:17,535 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/ef9235e23c2b4edb84b76f35d4e112ea, entries=300, sequenceid=542, filesize=19.2 K 2024-12-12T05:39:17,536 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/1f3036e27f374a63b412be44f0e5d7f5 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/1f3036e27f374a63b412be44f0e5d7f5 2024-12-12T05:39:17,541 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/1f3036e27f374a63b412be44f0e5d7f5, entries=150, sequenceid=542, filesize=12.0 K 2024-12-12T05:39:17,542 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/a4e3e5fff5484f33836b667eb6e3f077 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/a4e3e5fff5484f33836b667eb6e3f077 2024-12-12T05:39:17,547 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/a4e3e5fff5484f33836b667eb6e3f077, entries=150, sequenceid=542, filesize=12.0 K 2024-12-12T05:39:17,548 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 61279763b720b7a9988338e6150d61c7 in 1285ms, sequenceid=542, compaction requested=true 2024-12-12T05:39:17,548 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:17,548 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:39:17,548 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:17,548 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:17,548 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:39:17,548 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:17,548 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:17,548 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 61279763b720b7a9988338e6150d61c7:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:39:17,548 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:17,549 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 45381 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:17,550 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/A is initiating minor compaction (all files) 2024-12-12T05:39:17,550 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38061 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:17,550 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/B is initiating minor compaction (all files) 2024-12-12T05:39:17,550 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/A in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:17,550 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/B in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:17,550 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/ff5e8ec64c45494aaf72fd2027061117, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/a223a23a4e59432daaf6c6c34e98ea9a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/ef9235e23c2b4edb84b76f35d4e112ea] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=44.3 K 2024-12-12T05:39:17,550 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/2786216dae304feb87343d0e788f7ad4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/b70bf562412b4d5ea5bb3b37a6e12d09, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/1f3036e27f374a63b412be44f0e5d7f5] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=37.2 K 2024-12-12T05:39:17,550 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting ff5e8ec64c45494aaf72fd2027061117, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1733981953703 2024-12-12T05:39:17,550 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2786216dae304feb87343d0e788f7ad4, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1733981953703 2024-12-12T05:39:17,551 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting a223a23a4e59432daaf6c6c34e98ea9a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=515, earliestPutTs=1733981954024 2024-12-12T05:39:17,551 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting b70bf562412b4d5ea5bb3b37a6e12d09, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=515, earliestPutTs=1733981954024 2024-12-12T05:39:17,551 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting ef9235e23c2b4edb84b76f35d4e112ea, keycount=300, bloomtype=ROW, size=19.2 K, encoding=NONE, compression=NONE, seqNum=542, earliestPutTs=1733981956151 2024-12-12T05:39:17,551 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f3036e27f374a63b412be44f0e5d7f5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=542, earliestPutTs=1733981956262 2024-12-12T05:39:17,561 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#B#compaction#111 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:17,562 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/d186234a00374b24945d60caa4911fc9 is 50, key is test_row_0/B:col10/1733981956262/Put/seqid=0 2024-12-12T05:39:17,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741950_1126 (size=13561) 2024-12-12T05:39:17,587 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#A#compaction#112 average throughput is 0.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:17,587 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/062f155651d14a7daa7eb31c540e2d10 is 50, key is test_row_0/A:col10/1733981956262/Put/seqid=0 2024-12-12T05:39:17,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741951_1127 (size=13561) 2024-12-12T05:39:17,652 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:17,652 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-12-12T05:39:17,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:17,653 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2837): Flushing 61279763b720b7a9988338e6150d61c7 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-12T05:39:17,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=A 2024-12-12T05:39:17,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:17,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=B 2024-12-12T05:39:17,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:17,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=C 2024-12-12T05:39:17,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:17,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/b199bb7ebb6f405a977ed64fb083829c is 50, key is test_row_0/A:col10/1733981956267/Put/seqid=0 2024-12-12T05:39:17,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741952_1128 (size=12301) 2024-12-12T05:39:17,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-12T05:39:17,980 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/d186234a00374b24945d60caa4911fc9 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/d186234a00374b24945d60caa4911fc9 2024-12-12T05:39:17,986 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 61279763b720b7a9988338e6150d61c7/B of 61279763b720b7a9988338e6150d61c7 into d186234a00374b24945d60caa4911fc9(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:17,986 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:17,986 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/B, priority=13, startTime=1733981957548; duration=0sec 2024-12-12T05:39:17,986 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:17,986 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:B 2024-12-12T05:39:17,986 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:17,987 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38061 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:17,987 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 61279763b720b7a9988338e6150d61c7/C is initiating minor compaction (all files) 2024-12-12T05:39:17,988 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 61279763b720b7a9988338e6150d61c7/C in TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:17,988 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/3e20c64ef5c3420ea6af6a506a608192, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/507b3a9caef04fcdb615b7ededf90476, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/a4e3e5fff5484f33836b667eb6e3f077] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp, totalSize=37.2 K 2024-12-12T05:39:17,988 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e20c64ef5c3420ea6af6a506a608192, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1733981953703 2024-12-12T05:39:17,989 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 507b3a9caef04fcdb615b7ededf90476, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=515, earliestPutTs=1733981954024 2024-12-12T05:39:17,989 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting a4e3e5fff5484f33836b667eb6e3f077, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=542, earliestPutTs=1733981956262 2024-12-12T05:39:17,997 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/062f155651d14a7daa7eb31c540e2d10 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/062f155651d14a7daa7eb31c540e2d10 2024-12-12T05:39:18,000 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 61279763b720b7a9988338e6150d61c7#C#compaction#114 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:18,001 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/04642126a5814097859c700032ebd484 is 50, key is test_row_0/C:col10/1733981956262/Put/seqid=0 2024-12-12T05:39:18,003 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 61279763b720b7a9988338e6150d61c7/A of 61279763b720b7a9988338e6150d61c7 into 062f155651d14a7daa7eb31c540e2d10(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:18,003 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:18,003 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/A, priority=13, startTime=1733981957548; duration=0sec 2024-12-12T05:39:18,003 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:18,003 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:A 2024-12-12T05:39:18,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741953_1129 (size=13561) 2024-12-12T05:39:18,068 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=552 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/b199bb7ebb6f405a977ed64fb083829c 2024-12-12T05:39:18,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/e6cc36140dc34e61819978c238be9ff0 is 50, key is test_row_0/B:col10/1733981956267/Put/seqid=0 2024-12-12T05:39:18,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741954_1130 (size=12301) 2024-12-12T05:39:18,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 61279763b720b7a9988338e6150d61c7 2024-12-12T05:39:18,386 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. as already flushing 2024-12-12T05:39:18,387 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5400112e to 127.0.0.1:60303 2024-12-12T05:39:18,387 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x478bae6b to 127.0.0.1:60303 2024-12-12T05:39:18,387 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:39:18,387 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:39:18,389 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x22cb07dd to 127.0.0.1:60303 2024-12-12T05:39:18,389 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:39:18,390 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f343a4d to 127.0.0.1:60303 2024-12-12T05:39:18,391 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:39:18,391 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x38766d64 to 127.0.0.1:60303 2024-12-12T05:39:18,391 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:39:18,410 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/04642126a5814097859c700032ebd484 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/04642126a5814097859c700032ebd484 2024-12-12T05:39:18,416 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 61279763b720b7a9988338e6150d61c7/C of 61279763b720b7a9988338e6150d61c7 into 04642126a5814097859c700032ebd484(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:18,417 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:18,417 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7., storeName=61279763b720b7a9988338e6150d61c7/C, priority=13, startTime=1733981957548; duration=0sec 2024-12-12T05:39:18,417 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:18,417 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 61279763b720b7a9988338e6150d61c7:C 2024-12-12T05:39:18,481 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=552 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/e6cc36140dc34e61819978c238be9ff0 2024-12-12T05:39:18,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/ac9f71599ce4475bb9f5fdcdd2d1835c is 50, key is test_row_0/C:col10/1733981956267/Put/seqid=0 2024-12-12T05:39:18,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741955_1131 (size=12301) 2024-12-12T05:39:18,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-12T05:39:18,902 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=552 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/ac9f71599ce4475bb9f5fdcdd2d1835c 2024-12-12T05:39:18,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/b199bb7ebb6f405a977ed64fb083829c as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/b199bb7ebb6f405a977ed64fb083829c 2024-12-12T05:39:18,916 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/b199bb7ebb6f405a977ed64fb083829c, entries=150, sequenceid=552, filesize=12.0 K 2024-12-12T05:39:18,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/e6cc36140dc34e61819978c238be9ff0 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/e6cc36140dc34e61819978c238be9ff0 2024-12-12T05:39:18,921 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/e6cc36140dc34e61819978c238be9ff0, entries=150, sequenceid=552, filesize=12.0 K 2024-12-12T05:39:18,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/ac9f71599ce4475bb9f5fdcdd2d1835c as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/ac9f71599ce4475bb9f5fdcdd2d1835c 2024-12-12T05:39:18,925 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/ac9f71599ce4475bb9f5fdcdd2d1835c, entries=150, sequenceid=552, filesize=12.0 K 2024-12-12T05:39:18,926 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=33.54 KB/34350 for 61279763b720b7a9988338e6150d61c7 in 1273ms, sequenceid=552, compaction requested=false 2024-12-12T05:39:18,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2538): Flush status journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:18,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:18,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=35 2024-12-12T05:39:18,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=35 2024-12-12T05:39:18,927 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-12-12T05:39:18,927 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3500 sec 2024-12-12T05:39:18,928 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees in 2.3530 sec 2024-12-12T05:39:20,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-12T05:39:20,684 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-12-12T05:39:20,684 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-12T05:39:20,684 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 93 2024-12-12T05:39:20,684 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 88 2024-12-12T05:39:20,684 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 88 2024-12-12T05:39:20,684 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 85 2024-12-12T05:39:20,684 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 88 2024-12-12T05:39:20,684 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-12T05:39:20,684 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7824 2024-12-12T05:39:20,684 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7866 2024-12-12T05:39:20,684 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-12T05:39:20,684 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3456 2024-12-12T05:39:20,684 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10364 rows 2024-12-12T05:39:20,685 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3453 2024-12-12T05:39:20,685 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10353 rows 2024-12-12T05:39:20,685 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-12T05:39:20,685 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f6e36fe to 127.0.0.1:60303 2024-12-12T05:39:20,685 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:39:20,689 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-12T05:39:20,694 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-12T05:39:20,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=36, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-12T05:39:20,699 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733981960699"}]},"ts":"1733981960699"} 2024-12-12T05:39:20,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-12-12T05:39:20,700 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-12T05:39:20,729 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-12T05:39:20,731 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T05:39:20,737 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=61279763b720b7a9988338e6150d61c7, UNASSIGN}] 2024-12-12T05:39:20,738 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=61279763b720b7a9988338e6150d61c7, UNASSIGN 2024-12-12T05:39:20,738 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=61279763b720b7a9988338e6150d61c7, regionState=CLOSING, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:20,740 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T05:39:20,740 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; CloseRegionProcedure 61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566}] 2024-12-12T05:39:20,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-12-12T05:39:20,898 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:20,901 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(124): Close 61279763b720b7a9988338e6150d61c7 2024-12-12T05:39:20,901 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T05:39:20,902 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1681): Closing 61279763b720b7a9988338e6150d61c7, disabling compactions & flushes 2024-12-12T05:39:20,902 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:20,902 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:20,902 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. after waiting 0 ms 2024-12-12T05:39:20,903 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:20,903 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(2837): Flushing 61279763b720b7a9988338e6150d61c7 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-12T05:39:20,903 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=A 2024-12-12T05:39:20,903 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:20,903 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=B 2024-12-12T05:39:20,903 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:20,903 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 61279763b720b7a9988338e6150d61c7, store=C 2024-12-12T05:39:20,903 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:20,909 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/599f28291555403498f2773c6590404e is 50, key is test_row_0/A:col10/1733981958389/Put/seqid=0 2024-12-12T05:39:20,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741956_1132 (size=9857) 2024-12-12T05:39:21,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-12-12T05:39:21,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-12-12T05:39:21,315 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=563 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/599f28291555403498f2773c6590404e 2024-12-12T05:39:21,329 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/fbbda77c99f044b8b31d76e6ec9d2f43 is 50, key is test_row_0/B:col10/1733981958389/Put/seqid=0 2024-12-12T05:39:21,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741957_1133 (size=9857) 2024-12-12T05:39:21,736 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=563 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/fbbda77c99f044b8b31d76e6ec9d2f43 2024-12-12T05:39:21,752 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/164d4bf5dd7340b8a57410bd988c044b is 50, key is test_row_0/C:col10/1733981958389/Put/seqid=0 2024-12-12T05:39:21,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741958_1134 (size=9857) 2024-12-12T05:39:21,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-12-12T05:39:22,158 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=563 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/164d4bf5dd7340b8a57410bd988c044b 2024-12-12T05:39:22,168 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/A/599f28291555403498f2773c6590404e as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/599f28291555403498f2773c6590404e 2024-12-12T05:39:22,176 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/599f28291555403498f2773c6590404e, entries=100, sequenceid=563, filesize=9.6 K 2024-12-12T05:39:22,177 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/B/fbbda77c99f044b8b31d76e6ec9d2f43 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/fbbda77c99f044b8b31d76e6ec9d2f43 2024-12-12T05:39:22,182 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/fbbda77c99f044b8b31d76e6ec9d2f43, entries=100, sequenceid=563, filesize=9.6 K 2024-12-12T05:39:22,183 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/.tmp/C/164d4bf5dd7340b8a57410bd988c044b as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/164d4bf5dd7340b8a57410bd988c044b 2024-12-12T05:39:22,187 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/164d4bf5dd7340b8a57410bd988c044b, entries=100, sequenceid=563, filesize=9.6 K 2024-12-12T05:39:22,187 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 61279763b720b7a9988338e6150d61c7 in 1284ms, sequenceid=563, compaction requested=true 2024-12-12T05:39:22,188 DEBUG [StoreCloser-TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/7fd8efc5717744a89a110adc0a100cb1, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/7bfc4824fbf74dc48e95b5318bae7890, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/cb9cb08bebde4a859ffa9820e89d1438, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/1c364734fe5a455a8c013596fb07fef5, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/d433d93dd1dc43338ab34de56047dfbc, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/330dab8bc3bf4aa2afe9bf36c2aedf61, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/41b59c3b0bd3496893ab7d34c15c4433, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/1e3bab22b0c04e2cafecac8a4df240a0, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/f4fd0e00b98848ebbd75dbf6e05436ea, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/1182e0ada64d419dad86942751fdf511, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/e1ce93b5dfe04a5d8c0dc9bb48b82adc, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/d6e0ab29e6a34096a505ac485098d66a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/45d01d7d3543467d9ee7bba23cbb2c4e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/edd1893ea65c4998a98c481888eb9d20, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/cc9f2d37dc9a41a39790e2ed27ea5816, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/5b958e0815ac4dbe815a725423743846, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/40d9210cbc5543aa91dd26ad7b2f52fd, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/f743cda6233f43b4aa55f0839598ce6d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/b6d79912ac2542a4abc9bb31be910f2a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/92fd91a4dec64ed19da24166e1161072, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/1ca21348768a4608ad2f8f7be53b1184, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/db37227e1f2045709b6f005c1cfc6947, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/836dc2e21ef04c5f8f98b100edb738d9, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/0faaa088982745d08045f2787ed5efb1, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/d44826d337bc45a88c3def29b671684e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/17cd060f43fc499f9b9ec6e2eb87e851, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/2a05296a4d564c34bb2f7c56d1e20e4c, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/82ff347611a34f22883da979b32abf20, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/2878742d9f214cd9938c4bc3906b991f, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/c8352ecb2feb459da9c4567cab700eec, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/81c8efefa88d4102a1282e4e7017345e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/97bd687b8e0a41439052c191af35b63b, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/dd28a07ab22443bba2542e69b9f56789, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/9cd4c849efc9411a87859243005ac7c7, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/ff5e8ec64c45494aaf72fd2027061117, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/a223a23a4e59432daaf6c6c34e98ea9a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/ef9235e23c2b4edb84b76f35d4e112ea] to archive 2024-12-12T05:39:22,191 DEBUG [StoreCloser-TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T05:39:22,198 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/7fd8efc5717744a89a110adc0a100cb1 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/7fd8efc5717744a89a110adc0a100cb1 2024-12-12T05:39:22,198 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/1e3bab22b0c04e2cafecac8a4df240a0 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/1e3bab22b0c04e2cafecac8a4df240a0 2024-12-12T05:39:22,198 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/1c364734fe5a455a8c013596fb07fef5 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/1c364734fe5a455a8c013596fb07fef5 2024-12-12T05:39:22,198 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/41b59c3b0bd3496893ab7d34c15c4433 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/41b59c3b0bd3496893ab7d34c15c4433 2024-12-12T05:39:22,198 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/7bfc4824fbf74dc48e95b5318bae7890 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/7bfc4824fbf74dc48e95b5318bae7890 2024-12-12T05:39:22,199 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/330dab8bc3bf4aa2afe9bf36c2aedf61 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/330dab8bc3bf4aa2afe9bf36c2aedf61 2024-12-12T05:39:22,199 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/cb9cb08bebde4a859ffa9820e89d1438 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/cb9cb08bebde4a859ffa9820e89d1438 2024-12-12T05:39:22,199 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/d433d93dd1dc43338ab34de56047dfbc to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/d433d93dd1dc43338ab34de56047dfbc 2024-12-12T05:39:22,200 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/1182e0ada64d419dad86942751fdf511 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/1182e0ada64d419dad86942751fdf511 2024-12-12T05:39:22,201 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/f4fd0e00b98848ebbd75dbf6e05436ea to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/f4fd0e00b98848ebbd75dbf6e05436ea 2024-12-12T05:39:22,201 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/e1ce93b5dfe04a5d8c0dc9bb48b82adc to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/e1ce93b5dfe04a5d8c0dc9bb48b82adc 2024-12-12T05:39:22,201 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/d6e0ab29e6a34096a505ac485098d66a to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/d6e0ab29e6a34096a505ac485098d66a 2024-12-12T05:39:22,201 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/45d01d7d3543467d9ee7bba23cbb2c4e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/45d01d7d3543467d9ee7bba23cbb2c4e 2024-12-12T05:39:22,201 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/edd1893ea65c4998a98c481888eb9d20 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/edd1893ea65c4998a98c481888eb9d20 2024-12-12T05:39:22,202 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/cc9f2d37dc9a41a39790e2ed27ea5816 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/cc9f2d37dc9a41a39790e2ed27ea5816 2024-12-12T05:39:22,202 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/5b958e0815ac4dbe815a725423743846 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/5b958e0815ac4dbe815a725423743846 2024-12-12T05:39:22,202 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/40d9210cbc5543aa91dd26ad7b2f52fd to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/40d9210cbc5543aa91dd26ad7b2f52fd 2024-12-12T05:39:22,203 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/f743cda6233f43b4aa55f0839598ce6d to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/f743cda6233f43b4aa55f0839598ce6d 2024-12-12T05:39:22,203 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/1ca21348768a4608ad2f8f7be53b1184 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/1ca21348768a4608ad2f8f7be53b1184 2024-12-12T05:39:22,203 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/b6d79912ac2542a4abc9bb31be910f2a to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/b6d79912ac2542a4abc9bb31be910f2a 2024-12-12T05:39:22,206 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/db37227e1f2045709b6f005c1cfc6947 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/db37227e1f2045709b6f005c1cfc6947 2024-12-12T05:39:22,206 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/92fd91a4dec64ed19da24166e1161072 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/92fd91a4dec64ed19da24166e1161072 2024-12-12T05:39:22,207 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/0faaa088982745d08045f2787ed5efb1 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/0faaa088982745d08045f2787ed5efb1 2024-12-12T05:39:22,207 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/836dc2e21ef04c5f8f98b100edb738d9 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/836dc2e21ef04c5f8f98b100edb738d9 2024-12-12T05:39:22,208 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/2a05296a4d564c34bb2f7c56d1e20e4c to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/2a05296a4d564c34bb2f7c56d1e20e4c 2024-12-12T05:39:22,208 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/17cd060f43fc499f9b9ec6e2eb87e851 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/17cd060f43fc499f9b9ec6e2eb87e851 2024-12-12T05:39:22,208 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/82ff347611a34f22883da979b32abf20 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/82ff347611a34f22883da979b32abf20 2024-12-12T05:39:22,208 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/d44826d337bc45a88c3def29b671684e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/d44826d337bc45a88c3def29b671684e 2024-12-12T05:39:22,208 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/2878742d9f214cd9938c4bc3906b991f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/2878742d9f214cd9938c4bc3906b991f 2024-12-12T05:39:22,209 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/c8352ecb2feb459da9c4567cab700eec to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/c8352ecb2feb459da9c4567cab700eec 2024-12-12T05:39:22,209 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/81c8efefa88d4102a1282e4e7017345e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/81c8efefa88d4102a1282e4e7017345e 2024-12-12T05:39:22,209 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/97bd687b8e0a41439052c191af35b63b to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/97bd687b8e0a41439052c191af35b63b 2024-12-12T05:39:22,209 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/9cd4c849efc9411a87859243005ac7c7 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/9cd4c849efc9411a87859243005ac7c7 2024-12-12T05:39:22,209 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/ff5e8ec64c45494aaf72fd2027061117 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/ff5e8ec64c45494aaf72fd2027061117 2024-12-12T05:39:22,209 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/dd28a07ab22443bba2542e69b9f56789 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/dd28a07ab22443bba2542e69b9f56789 2024-12-12T05:39:22,210 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/a223a23a4e59432daaf6c6c34e98ea9a to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/a223a23a4e59432daaf6c6c34e98ea9a 2024-12-12T05:39:22,210 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/ef9235e23c2b4edb84b76f35d4e112ea to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/ef9235e23c2b4edb84b76f35d4e112ea 2024-12-12T05:39:22,221 DEBUG [StoreCloser-TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/34300d3d452445469390ea34cbfb4d6a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/8fba7b57810f4d5698645b5f6862a52e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/825d981f56644557b214efb2caa1132c, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/83cd2c1cf4ae4a889004783760078328, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/a27bbf8f0f6a412ba848fe808ee17ebd, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/10b24a813186451391712b8c33ce72d0, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/647d7f752992443ea8330022a41efc10, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/c9c0c3efe03a4cdc843bcacb955108af, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/731c2d75e4e441088480b4d85d825793, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/5d85011d43ac48a69d03e33e62e0fb42, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/e72aef580c2447d2953c12823cae62e5, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/b6f84380b81147f8be1250d923faa71f, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/ca6db3ab439e49fa85f7a3f8105b44a6, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/04992170a7c34c679a4291aab1ff977b, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/8229038cd9fe4bd9bfd5ab45c1671635, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/91ecb1b6a2ff43cfb444ba917a98afe9, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/04f6819a17694280acf4571dfec93ed4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/3983e4d54f7443188d1f6d573f6d3b61, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/45883efa055c499b901d26f1635963de, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/8be5550c71c14de98de4767e5427e1a8, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/e2475c2aa0fc4bdfa594052bb077b6f3, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/f296d6c818354c538495a152e1c84838, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/48d601b59b0e4b589db18ef5fa704fa1, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/d8f2fee9bba349d2ba4b10fa280d1d8d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/b8ab93bce230430982cb54d673afc043, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/23391ef93b004fcca13c2201df2c80ba, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/bb52a0ac865b497485c67f70e4e23e21, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/4b699416851343b881e4a62dd0c38e02, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/ee5cc8d68fae44d89aee610dd6fc3b26, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/d2d62fe045f54039a7c0953b165d25fe, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/0bb6a37f22844994944c88ec2b371a9e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/18aec951166e4684b6c82b6deb80459e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/139e317bc80740609f2cb49a7d369881, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/2786216dae304feb87343d0e788f7ad4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/8bd5fee487634579b5a4ecbfdd9337a0, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/b70bf562412b4d5ea5bb3b37a6e12d09, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/1f3036e27f374a63b412be44f0e5d7f5] to archive 2024-12-12T05:39:22,222 DEBUG [StoreCloser-TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T05:39:22,224 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/8fba7b57810f4d5698645b5f6862a52e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/8fba7b57810f4d5698645b5f6862a52e 2024-12-12T05:39:22,224 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/34300d3d452445469390ea34cbfb4d6a to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/34300d3d452445469390ea34cbfb4d6a 2024-12-12T05:39:22,224 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/83cd2c1cf4ae4a889004783760078328 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/83cd2c1cf4ae4a889004783760078328 2024-12-12T05:39:22,224 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/10b24a813186451391712b8c33ce72d0 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/10b24a813186451391712b8c33ce72d0 2024-12-12T05:39:22,225 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/647d7f752992443ea8330022a41efc10 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/647d7f752992443ea8330022a41efc10 2024-12-12T05:39:22,225 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/825d981f56644557b214efb2caa1132c to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/825d981f56644557b214efb2caa1132c 2024-12-12T05:39:22,225 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/c9c0c3efe03a4cdc843bcacb955108af to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/c9c0c3efe03a4cdc843bcacb955108af 2024-12-12T05:39:22,225 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/a27bbf8f0f6a412ba848fe808ee17ebd to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/a27bbf8f0f6a412ba848fe808ee17ebd 2024-12-12T05:39:22,226 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/5d85011d43ac48a69d03e33e62e0fb42 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/5d85011d43ac48a69d03e33e62e0fb42 2024-12-12T05:39:22,226 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/8229038cd9fe4bd9bfd5ab45c1671635 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/8229038cd9fe4bd9bfd5ab45c1671635 2024-12-12T05:39:22,226 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/731c2d75e4e441088480b4d85d825793 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/731c2d75e4e441088480b4d85d825793 2024-12-12T05:39:22,226 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/b6f84380b81147f8be1250d923faa71f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/b6f84380b81147f8be1250d923faa71f 2024-12-12T05:39:22,226 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/e72aef580c2447d2953c12823cae62e5 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/e72aef580c2447d2953c12823cae62e5 2024-12-12T05:39:22,227 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/04992170a7c34c679a4291aab1ff977b to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/04992170a7c34c679a4291aab1ff977b 2024-12-12T05:39:22,227 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/91ecb1b6a2ff43cfb444ba917a98afe9 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/91ecb1b6a2ff43cfb444ba917a98afe9 2024-12-12T05:39:22,228 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/ca6db3ab439e49fa85f7a3f8105b44a6 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/ca6db3ab439e49fa85f7a3f8105b44a6 2024-12-12T05:39:22,228 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/04f6819a17694280acf4571dfec93ed4 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/04f6819a17694280acf4571dfec93ed4 2024-12-12T05:39:22,228 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/3983e4d54f7443188d1f6d573f6d3b61 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/3983e4d54f7443188d1f6d573f6d3b61 2024-12-12T05:39:22,229 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/8be5550c71c14de98de4767e5427e1a8 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/8be5550c71c14de98de4767e5427e1a8 2024-12-12T05:39:22,229 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/e2475c2aa0fc4bdfa594052bb077b6f3 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/e2475c2aa0fc4bdfa594052bb077b6f3 2024-12-12T05:39:22,229 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/f296d6c818354c538495a152e1c84838 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/f296d6c818354c538495a152e1c84838 2024-12-12T05:39:22,229 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/48d601b59b0e4b589db18ef5fa704fa1 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/48d601b59b0e4b589db18ef5fa704fa1 2024-12-12T05:39:22,230 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/d8f2fee9bba349d2ba4b10fa280d1d8d to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/d8f2fee9bba349d2ba4b10fa280d1d8d 2024-12-12T05:39:22,230 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/45883efa055c499b901d26f1635963de to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/45883efa055c499b901d26f1635963de 2024-12-12T05:39:22,230 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/23391ef93b004fcca13c2201df2c80ba to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/23391ef93b004fcca13c2201df2c80ba 2024-12-12T05:39:22,231 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/bb52a0ac865b497485c67f70e4e23e21 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/bb52a0ac865b497485c67f70e4e23e21 2024-12-12T05:39:22,232 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/d2d62fe045f54039a7c0953b165d25fe to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/d2d62fe045f54039a7c0953b165d25fe 2024-12-12T05:39:22,232 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/4b699416851343b881e4a62dd0c38e02 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/4b699416851343b881e4a62dd0c38e02 2024-12-12T05:39:22,232 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/ee5cc8d68fae44d89aee610dd6fc3b26 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/ee5cc8d68fae44d89aee610dd6fc3b26 2024-12-12T05:39:22,233 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/0bb6a37f22844994944c88ec2b371a9e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/0bb6a37f22844994944c88ec2b371a9e 2024-12-12T05:39:22,233 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/139e317bc80740609f2cb49a7d369881 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/139e317bc80740609f2cb49a7d369881 2024-12-12T05:39:22,233 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/18aec951166e4684b6c82b6deb80459e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/18aec951166e4684b6c82b6deb80459e 2024-12-12T05:39:22,234 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/8bd5fee487634579b5a4ecbfdd9337a0 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/8bd5fee487634579b5a4ecbfdd9337a0 2024-12-12T05:39:22,234 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/b8ab93bce230430982cb54d673afc043 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/b8ab93bce230430982cb54d673afc043 2024-12-12T05:39:22,234 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/2786216dae304feb87343d0e788f7ad4 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/2786216dae304feb87343d0e788f7ad4 2024-12-12T05:39:22,234 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/b70bf562412b4d5ea5bb3b37a6e12d09 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/b70bf562412b4d5ea5bb3b37a6e12d09 2024-12-12T05:39:22,234 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/1f3036e27f374a63b412be44f0e5d7f5 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/1f3036e27f374a63b412be44f0e5d7f5 2024-12-12T05:39:22,235 DEBUG [StoreCloser-TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/eeb6670c0c1848a9a69bbcc516b09e61, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/2bd6b26e839e4882ad24a41a073973c9, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/7cb0303fe8524b67be4595951fa85546, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/57889e3fc4ad453cb4acff90de1035bb, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/3b8a0ad771f14bd19ef5bd32141f1472, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/34e146b086f14b779c0cb22250031415, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/1aca729a96184f12b9cdb840b0fa0bb5, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/255c1fd49ae74df4a9e05b376a49c1b1, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/3f918a04391c4875a6b4a55690a15ccd, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/cc5536071b414cfa97afe5b40b6afda7, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/a7b24f16b67a48d6a8a81a974509dc7d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/ac7cb0ab3c5d420ba366ceb2cd61db32, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/069b97931b5b45619d9742dac3068330, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/f5a1ec1fe29749b59c386313d65fb30c, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/d1cb41feddbf46f382b24cac85e4b228, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/eed6db4d118c4344b4fd2d949922194e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/72b5d4b80fa543b39a608cd58762d9d8, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/be198ce7ef474b50a989edfda4a4c640, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/872d712e9c934546b8cfcdd27344aaf3, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/4fafd35ce189427fae5094904229ead4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/bfc1ee0da470492493dfe59aa5751b2b, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/591991c048ba4c2798b4536b2ad9f8bd, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/16066e0d9b254ed083c982839489b69b, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/d47d7fbc880941dea50e1acd813c2e29, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/b2387046ad924b3ea10d5de5788f6fc1, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/91a89862d18147c2a3d6629065d7f6a2, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/70dabc3c4e7545bba79e8bcccb0a1843, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/58c1f7e4dd3d4a48ad79ee8be83ad217, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/f3d26b9bb1b3404aacbbaa951b43119b, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/de887883041c4cbbb67180ededd7744a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/67c389444a95425583abc9beb5935b7e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/57b9383f3f094d939de1b4674f842b1e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/97a5c68813984d68b85bc64cc08832f3, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/3e20c64ef5c3420ea6af6a506a608192, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/8b23c3ee48ed44b18b40ded4dcf004e1, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/507b3a9caef04fcdb615b7ededf90476, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/a4e3e5fff5484f33836b667eb6e3f077] to archive 2024-12-12T05:39:22,236 DEBUG [StoreCloser-TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T05:39:22,239 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/eeb6670c0c1848a9a69bbcc516b09e61 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/eeb6670c0c1848a9a69bbcc516b09e61 2024-12-12T05:39:22,239 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/7cb0303fe8524b67be4595951fa85546 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/7cb0303fe8524b67be4595951fa85546 2024-12-12T05:39:22,239 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/2bd6b26e839e4882ad24a41a073973c9 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/2bd6b26e839e4882ad24a41a073973c9 2024-12-12T05:39:22,239 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/57889e3fc4ad453cb4acff90de1035bb to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/57889e3fc4ad453cb4acff90de1035bb 2024-12-12T05:39:22,239 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/255c1fd49ae74df4a9e05b376a49c1b1 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/255c1fd49ae74df4a9e05b376a49c1b1 2024-12-12T05:39:22,240 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/1aca729a96184f12b9cdb840b0fa0bb5 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/1aca729a96184f12b9cdb840b0fa0bb5 2024-12-12T05:39:22,240 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/3b8a0ad771f14bd19ef5bd32141f1472 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/3b8a0ad771f14bd19ef5bd32141f1472 2024-12-12T05:39:22,240 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/34e146b086f14b779c0cb22250031415 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/34e146b086f14b779c0cb22250031415 2024-12-12T05:39:22,241 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/3f918a04391c4875a6b4a55690a15ccd to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/3f918a04391c4875a6b4a55690a15ccd 2024-12-12T05:39:22,241 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/a7b24f16b67a48d6a8a81a974509dc7d to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/a7b24f16b67a48d6a8a81a974509dc7d 2024-12-12T05:39:22,241 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/cc5536071b414cfa97afe5b40b6afda7 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/cc5536071b414cfa97afe5b40b6afda7 2024-12-12T05:39:22,242 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/069b97931b5b45619d9742dac3068330 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/069b97931b5b45619d9742dac3068330 2024-12-12T05:39:22,243 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/ac7cb0ab3c5d420ba366ceb2cd61db32 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/ac7cb0ab3c5d420ba366ceb2cd61db32 2024-12-12T05:39:22,243 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/72b5d4b80fa543b39a608cd58762d9d8 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/72b5d4b80fa543b39a608cd58762d9d8 2024-12-12T05:39:22,243 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/872d712e9c934546b8cfcdd27344aaf3 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/872d712e9c934546b8cfcdd27344aaf3 2024-12-12T05:39:22,243 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/f5a1ec1fe29749b59c386313d65fb30c to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/f5a1ec1fe29749b59c386313d65fb30c 2024-12-12T05:39:22,244 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/d1cb41feddbf46f382b24cac85e4b228 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/d1cb41feddbf46f382b24cac85e4b228 2024-12-12T05:39:22,244 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/be198ce7ef474b50a989edfda4a4c640 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/be198ce7ef474b50a989edfda4a4c640 2024-12-12T05:39:22,244 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/eed6db4d118c4344b4fd2d949922194e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/eed6db4d118c4344b4fd2d949922194e 2024-12-12T05:39:22,244 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/4fafd35ce189427fae5094904229ead4 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/4fafd35ce189427fae5094904229ead4 2024-12-12T05:39:22,245 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/16066e0d9b254ed083c982839489b69b to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/16066e0d9b254ed083c982839489b69b 2024-12-12T05:39:22,245 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/bfc1ee0da470492493dfe59aa5751b2b to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/bfc1ee0da470492493dfe59aa5751b2b 2024-12-12T05:39:22,246 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/591991c048ba4c2798b4536b2ad9f8bd to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/591991c048ba4c2798b4536b2ad9f8bd 2024-12-12T05:39:22,246 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/91a89862d18147c2a3d6629065d7f6a2 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/91a89862d18147c2a3d6629065d7f6a2 2024-12-12T05:39:22,246 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/58c1f7e4dd3d4a48ad79ee8be83ad217 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/58c1f7e4dd3d4a48ad79ee8be83ad217 2024-12-12T05:39:22,246 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/b2387046ad924b3ea10d5de5788f6fc1 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/b2387046ad924b3ea10d5de5788f6fc1 2024-12-12T05:39:22,246 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/70dabc3c4e7545bba79e8bcccb0a1843 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/70dabc3c4e7545bba79e8bcccb0a1843 2024-12-12T05:39:22,247 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/d47d7fbc880941dea50e1acd813c2e29 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/d47d7fbc880941dea50e1acd813c2e29 2024-12-12T05:39:22,247 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/f3d26b9bb1b3404aacbbaa951b43119b to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/f3d26b9bb1b3404aacbbaa951b43119b 2024-12-12T05:39:22,248 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/67c389444a95425583abc9beb5935b7e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/67c389444a95425583abc9beb5935b7e 2024-12-12T05:39:22,248 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/57b9383f3f094d939de1b4674f842b1e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/57b9383f3f094d939de1b4674f842b1e 2024-12-12T05:39:22,248 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/97a5c68813984d68b85bc64cc08832f3 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/97a5c68813984d68b85bc64cc08832f3 2024-12-12T05:39:22,248 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/de887883041c4cbbb67180ededd7744a to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/de887883041c4cbbb67180ededd7744a 2024-12-12T05:39:22,249 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/8b23c3ee48ed44b18b40ded4dcf004e1 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/8b23c3ee48ed44b18b40ded4dcf004e1 2024-12-12T05:39:22,249 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/3e20c64ef5c3420ea6af6a506a608192 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/3e20c64ef5c3420ea6af6a506a608192 2024-12-12T05:39:22,249 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/a4e3e5fff5484f33836b667eb6e3f077 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/a4e3e5fff5484f33836b667eb6e3f077 2024-12-12T05:39:22,249 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/507b3a9caef04fcdb615b7ededf90476 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/507b3a9caef04fcdb615b7ededf90476 2024-12-12T05:39:22,255 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/recovered.edits/566.seqid, newMaxSeqId=566, maxSeqId=1 2024-12-12T05:39:22,257 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7. 2024-12-12T05:39:22,258 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1635): Region close journal for 61279763b720b7a9988338e6150d61c7: 2024-12-12T05:39:22,259 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(170): Closed 61279763b720b7a9988338e6150d61c7 2024-12-12T05:39:22,259 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=61279763b720b7a9988338e6150d61c7, regionState=CLOSED 2024-12-12T05:39:22,262 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-12-12T05:39:22,262 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; CloseRegionProcedure 61279763b720b7a9988338e6150d61c7, server=83e80bf221ca,46457,1733981928566 in 1.5210 sec 2024-12-12T05:39:22,264 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=38, resume processing ppid=37 2024-12-12T05:39:22,264 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, ppid=37, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=61279763b720b7a9988338e6150d61c7, UNASSIGN in 1.5250 sec 2024-12-12T05:39:22,266 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-12-12T05:39:22,266 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5330 sec 2024-12-12T05:39:22,267 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733981962267"}]},"ts":"1733981962267"} 2024-12-12T05:39:22,268 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-12T05:39:22,304 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-12T05:39:22,307 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.6100 sec 2024-12-12T05:39:22,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-12-12T05:39:22,808 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 36 completed 2024-12-12T05:39:22,815 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-12T05:39:22,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=40, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:39:22,826 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=40, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:39:22,828 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=40, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:39:22,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-12-12T05:39:22,830 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7 2024-12-12T05:39:22,835 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A, FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B, FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C, FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/recovered.edits] 2024-12-12T05:39:22,839 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/062f155651d14a7daa7eb31c540e2d10 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/062f155651d14a7daa7eb31c540e2d10 2024-12-12T05:39:22,839 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/599f28291555403498f2773c6590404e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/599f28291555403498f2773c6590404e 2024-12-12T05:39:22,839 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/b199bb7ebb6f405a977ed64fb083829c to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/A/b199bb7ebb6f405a977ed64fb083829c 2024-12-12T05:39:22,842 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/fbbda77c99f044b8b31d76e6ec9d2f43 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/fbbda77c99f044b8b31d76e6ec9d2f43 2024-12-12T05:39:22,842 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/e6cc36140dc34e61819978c238be9ff0 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/e6cc36140dc34e61819978c238be9ff0 2024-12-12T05:39:22,842 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/d186234a00374b24945d60caa4911fc9 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/B/d186234a00374b24945d60caa4911fc9 2024-12-12T05:39:22,845 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/04642126a5814097859c700032ebd484 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/04642126a5814097859c700032ebd484 2024-12-12T05:39:22,845 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/164d4bf5dd7340b8a57410bd988c044b to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/164d4bf5dd7340b8a57410bd988c044b 2024-12-12T05:39:22,845 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/ac9f71599ce4475bb9f5fdcdd2d1835c to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/C/ac9f71599ce4475bb9f5fdcdd2d1835c 2024-12-12T05:39:22,848 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/recovered.edits/566.seqid to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7/recovered.edits/566.seqid 2024-12-12T05:39:22,848 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/61279763b720b7a9988338e6150d61c7 2024-12-12T05:39:22,848 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-12T05:39:22,853 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=40, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:39:22,857 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-12T05:39:22,861 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-12T05:39:22,890 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-12T05:39:22,891 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=40, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:39:22,891 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-12T05:39:22,891 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733981962891"}]},"ts":"9223372036854775807"} 2024-12-12T05:39:22,894 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-12T05:39:22,894 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 61279763b720b7a9988338e6150d61c7, NAME => 'TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7.', STARTKEY => '', ENDKEY => ''}] 2024-12-12T05:39:22,895 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-12T05:39:22,895 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733981962895"}]},"ts":"9223372036854775807"} 2024-12-12T05:39:22,897 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-12T05:39:22,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-12-12T05:39:22,933 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=40, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:39:22,934 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 117 msec 2024-12-12T05:39:23,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-12-12T05:39:23,132 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 40 completed 2024-12-12T05:39:23,148 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=246 (was 219) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RS:0;83e80bf221ca:46457-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-844571384_22 at /127.0.0.1:33246 [Waiting for operation #367] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1471900464_22 at /127.0.0.1:36768 [Waiting for operation #384] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x3e8ab696-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x3e8ab696-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x3e8ab696-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x3e8ab696-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=452 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=257 (was 134) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=13474 (was 14045) 2024-12-12T05:39:23,148 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-12T05:39:23,153 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45002, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-12T05:39:23,159 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=246, OpenFileDescriptor=454, MaxFileDescriptor=1048576, SystemLoadAverage=257, ProcessCount=11, AvailableMemoryMB=13473 2024-12-12T05:39:23,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T05:39:23,161 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T05:39:23,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-12T05:39:23,163 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T05:39:23,163 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:23,163 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 41 2024-12-12T05:39:23,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-12T05:39:23,164 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T05:39:23,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741959_1135 (size=963) 2024-12-12T05:39:23,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-12T05:39:23,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-12T05:39:23,574 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d 2024-12-12T05:39:23,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741960_1136 (size=53) 2024-12-12T05:39:23,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-12T05:39:23,988 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:39:23,988 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 92b0b352d91bdb0e121a8902637d8c5e, disabling compactions & flushes 2024-12-12T05:39:23,988 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:23,988 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:23,989 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. after waiting 0 ms 2024-12-12T05:39:23,989 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:23,989 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:23,989 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:23,992 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T05:39:23,993 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733981963992"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733981963992"}]},"ts":"1733981963992"} 2024-12-12T05:39:23,996 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T05:39:23,997 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T05:39:23,998 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733981963997"}]},"ts":"1733981963997"} 2024-12-12T05:39:23,999 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-12T05:39:24,046 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=92b0b352d91bdb0e121a8902637d8c5e, ASSIGN}] 2024-12-12T05:39:24,048 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=92b0b352d91bdb0e121a8902637d8c5e, ASSIGN 2024-12-12T05:39:24,049 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=92b0b352d91bdb0e121a8902637d8c5e, ASSIGN; state=OFFLINE, location=83e80bf221ca,46457,1733981928566; forceNewPlan=false, retain=false 2024-12-12T05:39:24,200 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=92b0b352d91bdb0e121a8902637d8c5e, regionState=OPENING, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:24,202 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=43, ppid=42, state=RUNNABLE; OpenRegionProcedure 92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566}] 2024-12-12T05:39:24,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-12T05:39:24,356 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:24,364 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:24,364 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(7285): Opening region: {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} 2024-12-12T05:39:24,365 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:24,365 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:39:24,365 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(7327): checking encryption for 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:24,365 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(7330): checking classloading for 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:24,367 INFO [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:24,368 INFO [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T05:39:24,368 INFO [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 92b0b352d91bdb0e121a8902637d8c5e columnFamilyName A 2024-12-12T05:39:24,369 DEBUG [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:24,369 INFO [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] regionserver.HStore(327): Store=92b0b352d91bdb0e121a8902637d8c5e/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:39:24,369 INFO [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:24,370 INFO [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T05:39:24,371 INFO [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 92b0b352d91bdb0e121a8902637d8c5e columnFamilyName B 2024-12-12T05:39:24,371 DEBUG [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:24,371 INFO [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] regionserver.HStore(327): Store=92b0b352d91bdb0e121a8902637d8c5e/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:39:24,371 INFO [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:24,373 INFO [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T05:39:24,373 INFO [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 92b0b352d91bdb0e121a8902637d8c5e columnFamilyName C 2024-12-12T05:39:24,373 DEBUG [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:24,373 INFO [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] regionserver.HStore(327): Store=92b0b352d91bdb0e121a8902637d8c5e/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:39:24,373 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:24,374 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:24,374 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:24,376 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T05:39:24,377 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1085): writing seq id for 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:24,379 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T05:39:24,379 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1102): Opened 92b0b352d91bdb0e121a8902637d8c5e; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66386828, jitterRate=-0.010759174823760986}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T05:39:24,380 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1001): Region open journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:24,381 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., pid=43, masterSystemTime=1733981964356 2024-12-12T05:39:24,382 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:24,382 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:24,382 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=92b0b352d91bdb0e121a8902637d8c5e, regionState=OPEN, openSeqNum=2, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:24,385 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=42 2024-12-12T05:39:24,385 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=42, state=SUCCESS; OpenRegionProcedure 92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 in 181 msec 2024-12-12T05:39:24,386 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=41 2024-12-12T05:39:24,386 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=92b0b352d91bdb0e121a8902637d8c5e, ASSIGN in 339 msec 2024-12-12T05:39:24,387 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T05:39:24,387 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733981964387"}]},"ts":"1733981964387"} 2024-12-12T05:39:24,388 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-12T05:39:24,396 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T05:39:24,398 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2350 sec 2024-12-12T05:39:25,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-12T05:39:25,275 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 41 completed 2024-12-12T05:39:25,281 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x26401a5f to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@407e6b5c 2024-12-12T05:39:25,322 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6eb305fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:39:25,325 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:39:25,328 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45296, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:39:25,331 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T05:39:25,334 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45006, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T05:39:25,339 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T05:39:25,339 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T05:39:25,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=44, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-12T05:39:25,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741961_1137 (size=999) 2024-12-12T05:39:25,756 DEBUG [PEWorker-5 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-12T05:39:25,756 INFO [PEWorker-5 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-12T05:39:25,761 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=44, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T05:39:25,771 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=92b0b352d91bdb0e121a8902637d8c5e, REOPEN/MOVE}] 2024-12-12T05:39:25,772 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=92b0b352d91bdb0e121a8902637d8c5e, REOPEN/MOVE 2024-12-12T05:39:25,773 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=92b0b352d91bdb0e121a8902637d8c5e, regionState=CLOSING, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:25,774 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T05:39:25,774 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=47, ppid=46, state=RUNNABLE; CloseRegionProcedure 92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566}] 2024-12-12T05:39:25,926 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:25,928 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] handler.UnassignRegionHandler(124): Close 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:25,928 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T05:39:25,928 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1681): Closing 92b0b352d91bdb0e121a8902637d8c5e, disabling compactions & flushes 2024-12-12T05:39:25,928 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:25,928 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:25,928 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. after waiting 0 ms 2024-12-12T05:39:25,928 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:25,938 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-12T05:39:25,939 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:25,939 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1635): Region close journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:25,939 WARN [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegionServer(3786): Not adding moved region record: 92b0b352d91bdb0e121a8902637d8c5e to self. 2024-12-12T05:39:25,941 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] handler.UnassignRegionHandler(170): Closed 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:25,942 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=92b0b352d91bdb0e121a8902637d8c5e, regionState=CLOSED 2024-12-12T05:39:25,945 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=47, resume processing ppid=46 2024-12-12T05:39:25,946 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, ppid=46, state=SUCCESS; CloseRegionProcedure 92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 in 170 msec 2024-12-12T05:39:25,946 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=92b0b352d91bdb0e121a8902637d8c5e, REOPEN/MOVE; state=CLOSED, location=83e80bf221ca,46457,1733981928566; forceNewPlan=false, retain=true 2024-12-12T05:39:26,097 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=92b0b352d91bdb0e121a8902637d8c5e, regionState=OPENING, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:26,100 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=46, state=RUNNABLE; OpenRegionProcedure 92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566}] 2024-12-12T05:39:26,253 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:26,256 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:26,257 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7285): Opening region: {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} 2024-12-12T05:39:26,257 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:26,257 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:39:26,257 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7327): checking encryption for 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:26,257 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7330): checking classloading for 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:26,259 INFO [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:26,260 INFO [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T05:39:26,266 INFO [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 92b0b352d91bdb0e121a8902637d8c5e columnFamilyName A 2024-12-12T05:39:26,269 DEBUG [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:26,270 INFO [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] regionserver.HStore(327): Store=92b0b352d91bdb0e121a8902637d8c5e/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:39:26,271 INFO [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:26,272 INFO [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T05:39:26,272 INFO [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 92b0b352d91bdb0e121a8902637d8c5e columnFamilyName B 2024-12-12T05:39:26,272 DEBUG [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:26,272 INFO [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] regionserver.HStore(327): Store=92b0b352d91bdb0e121a8902637d8c5e/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:39:26,272 INFO [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:26,273 INFO [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T05:39:26,273 INFO [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 92b0b352d91bdb0e121a8902637d8c5e columnFamilyName C 2024-12-12T05:39:26,273 DEBUG [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:26,273 INFO [StoreOpener-92b0b352d91bdb0e121a8902637d8c5e-1 {}] regionserver.HStore(327): Store=92b0b352d91bdb0e121a8902637d8c5e/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:39:26,273 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:26,274 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:26,275 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:26,276 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T05:39:26,278 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1085): writing seq id for 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:26,278 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1102): Opened 92b0b352d91bdb0e121a8902637d8c5e; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73714310, jitterRate=0.09842881560325623}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T05:39:26,279 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1001): Region open journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:26,280 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., pid=48, masterSystemTime=1733981966253 2024-12-12T05:39:26,281 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:26,282 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:26,282 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=92b0b352d91bdb0e121a8902637d8c5e, regionState=OPEN, openSeqNum=5, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:26,285 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=46 2024-12-12T05:39:26,285 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=46, state=SUCCESS; OpenRegionProcedure 92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 in 183 msec 2024-12-12T05:39:26,286 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-12-12T05:39:26,286 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=92b0b352d91bdb0e121a8902637d8c5e, REOPEN/MOVE in 514 msec 2024-12-12T05:39:26,289 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=44 2024-12-12T05:39:26,289 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=44, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 527 msec 2024-12-12T05:39:26,291 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 949 msec 2024-12-12T05:39:26,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=44 2024-12-12T05:39:26,299 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5c820ef9 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7b4bd1ba 2024-12-12T05:39:26,365 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@176c5c1b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:39:26,366 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0e3a4420 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ebda6ad 2024-12-12T05:39:26,472 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@190853fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:39:26,474 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x42e904d8 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@505d5ccd 2024-12-12T05:39:26,610 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46114993, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:39:26,614 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0a4c53ed to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@367f47f7 2024-12-12T05:39:26,630 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68f0be85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:39:26,631 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x22e911df to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@78cafade 2024-12-12T05:39:26,638 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@152377d4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:39:26,640 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x14c16cd4 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1a52344f 2024-12-12T05:39:26,651 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3448d233, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:39:26,652 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0341384e to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8ba8425 2024-12-12T05:39:26,663 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a11164b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:39:26,664 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x26b120d9 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7af61386 2024-12-12T05:39:26,671 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8a7e1dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:39:26,673 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4c1ec7ee to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@63e87c8 2024-12-12T05:39:26,685 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31a027db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:39:26,692 DEBUG [hconnection-0x31153c1b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:39:26,692 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:39:26,692 DEBUG [hconnection-0x1e22067d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:39:26,692 DEBUG [hconnection-0x3627e90e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:39:26,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-12-12T05:39:26,693 DEBUG [hconnection-0x57c53fe4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:39:26,694 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45304, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:39:26,694 DEBUG [hconnection-0x623f48b6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:39:26,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-12T05:39:26,694 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45324, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:39:26,695 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45310, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:39:26,695 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:39:26,695 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45334, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:39:26,695 DEBUG [hconnection-0x38f3bfad-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:39:26,696 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:39:26,696 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:39:26,696 DEBUG [hconnection-0x78b8fca0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:39:26,696 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45340, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:39:26,697 DEBUG [hconnection-0x6ebb124e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:39:26,697 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45354, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:39:26,698 DEBUG [hconnection-0x553fba0b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:39:26,698 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45368, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:39:26,698 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45364, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:39:26,699 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45382, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:39:26,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:26,709 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 92b0b352d91bdb0e121a8902637d8c5e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T05:39:26,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=A 2024-12-12T05:39:26,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:26,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=B 2024-12-12T05:39:26,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:26,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=C 2024-12-12T05:39:26,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:26,755 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412127f6d5ac5bc8849959546b2237067110c_92b0b352d91bdb0e121a8902637d8c5e is 50, key is test_row_0/A:col10/1733981966707/Put/seqid=0 2024-12-12T05:39:26,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741962_1138 (size=12154) 2024-12-12T05:39:26,781 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:26,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982026776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:26,782 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:26,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45324 deadline: 1733982026777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:26,782 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:26,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982026778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:26,783 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:26,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982026780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:26,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:26,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982026782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:26,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-12T05:39:26,848 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:26,849 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-12T05:39:26,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:26,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:26,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:26,849 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:26,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:26,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:26,884 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:26,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45324 deadline: 1733982026884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:26,885 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:26,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982026884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:26,885 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:26,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982026884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:26,885 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:26,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982026885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:26,894 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:26,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982026893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:26,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-12T05:39:27,001 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:27,002 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-12T05:39:27,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:27,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:27,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:27,002 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:27,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:27,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:27,088 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:27,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:27,088 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:27,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45324 deadline: 1733982027087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:27,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982027086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:27,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:27,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982027087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:27,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982027087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:27,098 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:27,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982027096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:27,155 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:27,155 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-12T05:39:27,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:27,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:27,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:27,156 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:27,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:27,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:27,163 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:27,169 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412127f6d5ac5bc8849959546b2237067110c_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412127f6d5ac5bc8849959546b2237067110c_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:27,170 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/124ed0f262ac4a548314be56609e0f00, store: [table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:27,179 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/124ed0f262ac4a548314be56609e0f00 is 175, key is test_row_0/A:col10/1733981966707/Put/seqid=0 2024-12-12T05:39:27,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741963_1139 (size=30955) 2024-12-12T05:39:27,195 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/124ed0f262ac4a548314be56609e0f00 2024-12-12T05:39:27,233 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/7040dd6a2d484f4ebda3feea4505f0fd is 50, key is test_row_0/B:col10/1733981966707/Put/seqid=0 2024-12-12T05:39:27,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741964_1140 (size=12001) 2024-12-12T05:39:27,249 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/7040dd6a2d484f4ebda3feea4505f0fd 2024-12-12T05:39:27,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-12T05:39:27,308 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:27,309 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-12T05:39:27,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:27,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:27,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:27,309 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:27,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:27,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:27,314 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/a0a97730903e4b26885f7f1342d1d289 is 50, key is test_row_0/C:col10/1733981966707/Put/seqid=0 2024-12-12T05:39:27,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741965_1141 (size=12001) 2024-12-12T05:39:27,338 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/a0a97730903e4b26885f7f1342d1d289 2024-12-12T05:39:27,344 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/124ed0f262ac4a548314be56609e0f00 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/124ed0f262ac4a548314be56609e0f00 2024-12-12T05:39:27,348 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/124ed0f262ac4a548314be56609e0f00, entries=150, sequenceid=16, filesize=30.2 K 2024-12-12T05:39:27,349 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/7040dd6a2d484f4ebda3feea4505f0fd as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/7040dd6a2d484f4ebda3feea4505f0fd 2024-12-12T05:39:27,356 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/7040dd6a2d484f4ebda3feea4505f0fd, entries=150, sequenceid=16, filesize=11.7 K 2024-12-12T05:39:27,357 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/a0a97730903e4b26885f7f1342d1d289 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/a0a97730903e4b26885f7f1342d1d289 2024-12-12T05:39:27,369 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/a0a97730903e4b26885f7f1342d1d289, entries=150, sequenceid=16, filesize=11.7 K 2024-12-12T05:39:27,370 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 92b0b352d91bdb0e121a8902637d8c5e in 661ms, sequenceid=16, compaction requested=false 2024-12-12T05:39:27,370 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:27,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:27,393 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 92b0b352d91bdb0e121a8902637d8c5e 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T05:39:27,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=A 2024-12-12T05:39:27,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:27,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=B 2024-12-12T05:39:27,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:27,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=C 2024-12-12T05:39:27,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:27,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:27,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982027402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:27,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:27,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982027402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:27,409 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:27,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982027403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:27,409 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:27,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45324 deadline: 1733982027404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:27,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:27,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982027404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:27,415 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212920e80c43b634c4091f35c224610c33a_92b0b352d91bdb0e121a8902637d8c5e is 50, key is test_row_0/A:col10/1733981967391/Put/seqid=0 2024-12-12T05:39:27,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741966_1142 (size=12154) 2024-12-12T05:39:27,462 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:27,462 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-12T05:39:27,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:27,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:27,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:27,463 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:27,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:27,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:27,506 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:27,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982027505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:27,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:27,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982027510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:27,514 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:27,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45324 deadline: 1733982027511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:27,514 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:27,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982027511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:27,615 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:27,615 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-12T05:39:27,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:27,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:27,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:27,616 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:27,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:27,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:27,711 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:27,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982027709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:27,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:27,718 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:27,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45324 deadline: 1733982027715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:27,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982027715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:27,718 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:27,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982027716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:27,768 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:27,769 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-12T05:39:27,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:27,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:27,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:27,770 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:27,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:27,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:27,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-12T05:39:27,799 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-12T05:39:27,834 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:27,840 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212920e80c43b634c4091f35c224610c33a_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212920e80c43b634c4091f35c224610c33a_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:27,841 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/a2460f1c746c4358b4c32a18f22570a9, store: [table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:27,842 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/a2460f1c746c4358b4c32a18f22570a9 is 175, key is test_row_0/A:col10/1733981967391/Put/seqid=0 2024-12-12T05:39:27,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741967_1143 (size=30955) 2024-12-12T05:39:27,865 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/a2460f1c746c4358b4c32a18f22570a9 2024-12-12T05:39:27,875 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/983f90661ac4459d99a1c8ff46b842aa is 50, key is test_row_0/B:col10/1733981967391/Put/seqid=0 2024-12-12T05:39:27,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741968_1144 (size=12001) 2024-12-12T05:39:27,882 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/983f90661ac4459d99a1c8ff46b842aa 2024-12-12T05:39:27,894 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/c05966d100854113bdaa4bf85fbf1dec is 50, key is test_row_0/C:col10/1733981967391/Put/seqid=0 2024-12-12T05:39:27,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741969_1145 (size=12001) 2024-12-12T05:39:27,906 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/c05966d100854113bdaa4bf85fbf1dec 2024-12-12T05:39:27,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:27,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982027908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:27,914 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/a2460f1c746c4358b4c32a18f22570a9 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/a2460f1c746c4358b4c32a18f22570a9 2024-12-12T05:39:27,921 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/a2460f1c746c4358b4c32a18f22570a9, entries=150, sequenceid=41, filesize=30.2 K 2024-12-12T05:39:27,922 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:27,922 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-12T05:39:27,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:27,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:27,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:27,923 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/983f90661ac4459d99a1c8ff46b842aa as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/983f90661ac4459d99a1c8ff46b842aa 2024-12-12T05:39:27,923 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:27,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:27,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:27,931 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/983f90661ac4459d99a1c8ff46b842aa, entries=150, sequenceid=41, filesize=11.7 K 2024-12-12T05:39:27,932 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/c05966d100854113bdaa4bf85fbf1dec as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/c05966d100854113bdaa4bf85fbf1dec 2024-12-12T05:39:27,940 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/c05966d100854113bdaa4bf85fbf1dec, entries=150, sequenceid=41, filesize=11.7 K 2024-12-12T05:39:27,941 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 92b0b352d91bdb0e121a8902637d8c5e in 548ms, sequenceid=41, compaction requested=false 2024-12-12T05:39:27,941 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:28,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:28,015 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 92b0b352d91bdb0e121a8902637d8c5e 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T05:39:28,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=A 2024-12-12T05:39:28,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:28,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=B 2024-12-12T05:39:28,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:28,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=C 2024-12-12T05:39:28,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:28,030 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212658cedca716a4df491f8b2f60155f563_92b0b352d91bdb0e121a8902637d8c5e is 50, key is test_row_0/A:col10/1733981967394/Put/seqid=0 2024-12-12T05:39:28,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741970_1146 (size=12154) 2024-12-12T05:39:28,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:28,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982028046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:28,051 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:28,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45324 deadline: 1733982028047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:28,053 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:28,054 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:28,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982028050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:28,054 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:28,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982028051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:28,063 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212658cedca716a4df491f8b2f60155f563_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212658cedca716a4df491f8b2f60155f563_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:28,065 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/cea44cd5e9b54454aaa81b5b732b92d1, store: [table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:28,067 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/cea44cd5e9b54454aaa81b5b732b92d1 is 175, key is test_row_0/A:col10/1733981967394/Put/seqid=0 2024-12-12T05:39:28,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741971_1147 (size=30955) 2024-12-12T05:39:28,075 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=53, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/cea44cd5e9b54454aaa81b5b732b92d1 2024-12-12T05:39:28,076 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:28,076 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-12T05:39:28,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:28,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:28,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:28,077 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:28,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:28,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:28,090 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/6764f6cb186f4ea9b2543fe8f0a82381 is 50, key is test_row_0/B:col10/1733981967394/Put/seqid=0 2024-12-12T05:39:28,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741972_1148 (size=12001) 2024-12-12T05:39:28,099 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/6764f6cb186f4ea9b2543fe8f0a82381 2024-12-12T05:39:28,113 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/9d651d16eb8146319fd4d7b76cc925a5 is 50, key is test_row_0/C:col10/1733981967394/Put/seqid=0 2024-12-12T05:39:28,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741973_1149 (size=12001) 2024-12-12T05:39:28,130 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/9d651d16eb8146319fd4d7b76cc925a5 2024-12-12T05:39:28,138 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/cea44cd5e9b54454aaa81b5b732b92d1 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/cea44cd5e9b54454aaa81b5b732b92d1 2024-12-12T05:39:28,144 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/cea44cd5e9b54454aaa81b5b732b92d1, entries=150, sequenceid=53, filesize=30.2 K 2024-12-12T05:39:28,147 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/6764f6cb186f4ea9b2543fe8f0a82381 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/6764f6cb186f4ea9b2543fe8f0a82381 2024-12-12T05:39:28,153 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:28,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982028152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:28,153 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:28,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45324 deadline: 1733982028152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:28,157 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/6764f6cb186f4ea9b2543fe8f0a82381, entries=150, sequenceid=53, filesize=11.7 K 2024-12-12T05:39:28,159 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/9d651d16eb8146319fd4d7b76cc925a5 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/9d651d16eb8146319fd4d7b76cc925a5 2024-12-12T05:39:28,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:28,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982028155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:28,159 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:28,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982028155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:28,167 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/9d651d16eb8146319fd4d7b76cc925a5, entries=150, sequenceid=53, filesize=11.7 K 2024-12-12T05:39:28,169 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 92b0b352d91bdb0e121a8902637d8c5e in 155ms, sequenceid=53, compaction requested=true 2024-12-12T05:39:28,169 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:28,169 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:28,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92b0b352d91bdb0e121a8902637d8c5e:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:39:28,171 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:28,171 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 92b0b352d91bdb0e121a8902637d8c5e/A is initiating minor compaction (all files) 2024-12-12T05:39:28,171 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92b0b352d91bdb0e121a8902637d8c5e/A in TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:28,171 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/124ed0f262ac4a548314be56609e0f00, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/a2460f1c746c4358b4c32a18f22570a9, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/cea44cd5e9b54454aaa81b5b732b92d1] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp, totalSize=90.7 K 2024-12-12T05:39:28,171 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:28,171 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. files: [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/124ed0f262ac4a548314be56609e0f00, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/a2460f1c746c4358b4c32a18f22570a9, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/cea44cd5e9b54454aaa81b5b732b92d1] 2024-12-12T05:39:28,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:28,171 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:28,172 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 124ed0f262ac4a548314be56609e0f00, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733981966704 2024-12-12T05:39:28,173 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:28,173 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 92b0b352d91bdb0e121a8902637d8c5e/B is initiating minor compaction (all files) 2024-12-12T05:39:28,173 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92b0b352d91bdb0e121a8902637d8c5e/B in TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:28,173 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/7040dd6a2d484f4ebda3feea4505f0fd, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/983f90661ac4459d99a1c8ff46b842aa, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/6764f6cb186f4ea9b2543fe8f0a82381] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp, totalSize=35.2 K 2024-12-12T05:39:28,173 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting a2460f1c746c4358b4c32a18f22570a9, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733981966748 2024-12-12T05:39:28,174 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 7040dd6a2d484f4ebda3feea4505f0fd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733981966704 2024-12-12T05:39:28,174 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting cea44cd5e9b54454aaa81b5b732b92d1, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733981967394 2024-12-12T05:39:28,174 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 983f90661ac4459d99a1c8ff46b842aa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733981966748 2024-12-12T05:39:28,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92b0b352d91bdb0e121a8902637d8c5e:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:39:28,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:28,175 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 6764f6cb186f4ea9b2543fe8f0a82381, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733981967394 2024-12-12T05:39:28,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92b0b352d91bdb0e121a8902637d8c5e:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:39:28,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:28,187 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92b0b352d91bdb0e121a8902637d8c5e#B#compaction#129 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:28,188 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/0088bff5b2d74fb3873e99c2fdd44fd5 is 50, key is test_row_0/B:col10/1733981967394/Put/seqid=0 2024-12-12T05:39:28,192 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:28,200 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412125a2e94c5ff1d441d83bc0382d59b0f76_92b0b352d91bdb0e121a8902637d8c5e store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:28,203 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412125a2e94c5ff1d441d83bc0382d59b0f76_92b0b352d91bdb0e121a8902637d8c5e, store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:28,203 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412125a2e94c5ff1d441d83bc0382d59b0f76_92b0b352d91bdb0e121a8902637d8c5e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:28,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741974_1150 (size=12104) 2024-12-12T05:39:28,222 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/0088bff5b2d74fb3873e99c2fdd44fd5 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/0088bff5b2d74fb3873e99c2fdd44fd5 2024-12-12T05:39:28,231 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:28,232 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-12T05:39:28,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:28,232 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 92b0b352d91bdb0e121a8902637d8c5e 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T05:39:28,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=A 2024-12-12T05:39:28,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:28,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=B 2024-12-12T05:39:28,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:28,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=C 2024-12-12T05:39:28,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:28,234 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92b0b352d91bdb0e121a8902637d8c5e/B of 92b0b352d91bdb0e121a8902637d8c5e into 0088bff5b2d74fb3873e99c2fdd44fd5(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:28,235 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:28,235 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., storeName=92b0b352d91bdb0e121a8902637d8c5e/B, priority=13, startTime=1733981968171; duration=0sec 2024-12-12T05:39:28,235 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:28,235 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92b0b352d91bdb0e121a8902637d8c5e:B 2024-12-12T05:39:28,235 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:28,237 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:28,237 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 92b0b352d91bdb0e121a8902637d8c5e/C is initiating minor compaction (all files) 2024-12-12T05:39:28,237 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92b0b352d91bdb0e121a8902637d8c5e/C in TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:28,237 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/a0a97730903e4b26885f7f1342d1d289, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/c05966d100854113bdaa4bf85fbf1dec, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/9d651d16eb8146319fd4d7b76cc925a5] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp, totalSize=35.2 K 2024-12-12T05:39:28,238 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting a0a97730903e4b26885f7f1342d1d289, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733981966704 2024-12-12T05:39:28,238 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting c05966d100854113bdaa4bf85fbf1dec, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733981966748 2024-12-12T05:39:28,239 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 9d651d16eb8146319fd4d7b76cc925a5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733981967394 2024-12-12T05:39:28,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741975_1151 (size=4469) 2024-12-12T05:39:28,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212866c08560a7d4848b4089ba412c26cad_92b0b352d91bdb0e121a8902637d8c5e is 50, key is test_row_0/A:col10/1733981968042/Put/seqid=0 2024-12-12T05:39:28,273 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92b0b352d91bdb0e121a8902637d8c5e#C#compaction#132 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:28,273 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/871888ecbdd44e19a903fb950c343efc is 50, key is test_row_0/C:col10/1733981967394/Put/seqid=0 2024-12-12T05:39:28,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741976_1152 (size=12154) 2024-12-12T05:39:28,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741977_1153 (size=12104) 2024-12-12T05:39:28,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:28,357 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:28,370 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:28,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982028365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:28,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:28,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982028366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:28,370 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:28,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45324 deadline: 1733982028367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:28,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:28,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982028370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:28,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:28,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982028473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:28,474 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:28,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982028473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:28,475 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:28,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45324 deadline: 1733982028474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:28,475 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:28,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982028474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:28,654 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92b0b352d91bdb0e121a8902637d8c5e#A#compaction#130 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:28,655 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/83da5e0af8e94600a9c358139e0721c2 is 175, key is test_row_0/A:col10/1733981967394/Put/seqid=0 2024-12-12T05:39:28,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741978_1154 (size=31058) 2024-12-12T05:39:28,672 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/83da5e0af8e94600a9c358139e0721c2 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/83da5e0af8e94600a9c358139e0721c2 2024-12-12T05:39:28,675 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:28,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982028675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:28,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:28,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982028677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:28,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:28,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45324 deadline: 1733982028677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:28,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:28,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982028677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:28,686 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92b0b352d91bdb0e121a8902637d8c5e/A of 92b0b352d91bdb0e121a8902637d8c5e into 83da5e0af8e94600a9c358139e0721c2(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:28,686 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:28,686 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., storeName=92b0b352d91bdb0e121a8902637d8c5e/A, priority=13, startTime=1733981968169; duration=0sec 2024-12-12T05:39:28,686 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:28,686 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92b0b352d91bdb0e121a8902637d8c5e:A 2024-12-12T05:39:28,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:28,709 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212866c08560a7d4848b4089ba412c26cad_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212866c08560a7d4848b4089ba412c26cad_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:28,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/df7de6f3b15942f4b2cfa8984eb76b15, store: [table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:28,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/df7de6f3b15942f4b2cfa8984eb76b15 is 175, key is test_row_0/A:col10/1733981968042/Put/seqid=0 2024-12-12T05:39:28,719 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/871888ecbdd44e19a903fb950c343efc as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/871888ecbdd44e19a903fb950c343efc 2024-12-12T05:39:28,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741979_1155 (size=30955) 2024-12-12T05:39:28,725 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=77, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/df7de6f3b15942f4b2cfa8984eb76b15 2024-12-12T05:39:28,733 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92b0b352d91bdb0e121a8902637d8c5e/C of 92b0b352d91bdb0e121a8902637d8c5e into 871888ecbdd44e19a903fb950c343efc(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:28,733 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:28,733 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., storeName=92b0b352d91bdb0e121a8902637d8c5e/C, priority=13, startTime=1733981968175; duration=0sec 2024-12-12T05:39:28,733 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:28,733 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92b0b352d91bdb0e121a8902637d8c5e:C 2024-12-12T05:39:28,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/88c3e5bac7c741f4852ba866bb799405 is 50, key is test_row_0/B:col10/1733981968042/Put/seqid=0 2024-12-12T05:39:28,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741980_1156 (size=12001) 2024-12-12T05:39:28,762 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/88c3e5bac7c741f4852ba866bb799405 2024-12-12T05:39:28,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/2600738e975a41899b75711082c9f2f8 is 50, key is test_row_0/C:col10/1733981968042/Put/seqid=0 2024-12-12T05:39:28,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741981_1157 (size=12001) 2024-12-12T05:39:28,785 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/2600738e975a41899b75711082c9f2f8 2024-12-12T05:39:28,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/df7de6f3b15942f4b2cfa8984eb76b15 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/df7de6f3b15942f4b2cfa8984eb76b15 2024-12-12T05:39:28,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-12T05:39:28,802 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/df7de6f3b15942f4b2cfa8984eb76b15, entries=150, sequenceid=77, filesize=30.2 K 2024-12-12T05:39:28,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/88c3e5bac7c741f4852ba866bb799405 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/88c3e5bac7c741f4852ba866bb799405 2024-12-12T05:39:28,808 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/88c3e5bac7c741f4852ba866bb799405, entries=150, sequenceid=77, filesize=11.7 K 2024-12-12T05:39:28,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/2600738e975a41899b75711082c9f2f8 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/2600738e975a41899b75711082c9f2f8 2024-12-12T05:39:28,814 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/2600738e975a41899b75711082c9f2f8, entries=150, sequenceid=77, filesize=11.7 K 2024-12-12T05:39:28,815 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 92b0b352d91bdb0e121a8902637d8c5e in 583ms, sequenceid=77, compaction requested=false 2024-12-12T05:39:28,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:28,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:28,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-12-12T05:39:28,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-12-12T05:39:28,819 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-12T05:39:28,819 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1210 sec 2024-12-12T05:39:28,822 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 2.1280 sec 2024-12-12T05:39:28,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:28,919 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 92b0b352d91bdb0e121a8902637d8c5e 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-12T05:39:28,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=A 2024-12-12T05:39:28,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:28,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=B 2024-12-12T05:39:28,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:28,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=C 2024-12-12T05:39:28,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:28,933 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212075bbd470fab43c5be0139bba50a4dfd_92b0b352d91bdb0e121a8902637d8c5e is 50, key is test_row_0/A:col10/1733981968369/Put/seqid=0 2024-12-12T05:39:28,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741982_1158 (size=14594) 2024-12-12T05:39:28,952 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:28,958 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212075bbd470fab43c5be0139bba50a4dfd_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212075bbd470fab43c5be0139bba50a4dfd_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:28,959 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/531efc3bad5248ca860b8c527b554b1e, store: [table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:28,960 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/531efc3bad5248ca860b8c527b554b1e is 175, key is test_row_0/A:col10/1733981968369/Put/seqid=0 2024-12-12T05:39:28,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741983_1159 (size=39549) 2024-12-12T05:39:28,975 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=93, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/531efc3bad5248ca860b8c527b554b1e 2024-12-12T05:39:28,989 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/376122f885f74165ae71a83ebc05739a is 50, key is test_row_0/B:col10/1733981968369/Put/seqid=0 2024-12-12T05:39:28,991 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:28,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45324 deadline: 1733982028984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:28,992 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:28,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982028986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:28,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:28,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982028990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:28,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:28,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982028990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:28,994 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:28,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982028991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:28,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741984_1160 (size=12001) 2024-12-12T05:39:28,998 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/376122f885f74165ae71a83ebc05739a 2024-12-12T05:39:29,009 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/50066d24316f40ab9451bb3af916461f is 50, key is test_row_0/C:col10/1733981968369/Put/seqid=0 2024-12-12T05:39:29,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741985_1161 (size=12001) 2024-12-12T05:39:29,022 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/50066d24316f40ab9451bb3af916461f 2024-12-12T05:39:29,029 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/531efc3bad5248ca860b8c527b554b1e as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/531efc3bad5248ca860b8c527b554b1e 2024-12-12T05:39:29,035 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/531efc3bad5248ca860b8c527b554b1e, entries=200, sequenceid=93, filesize=38.6 K 2024-12-12T05:39:29,037 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/376122f885f74165ae71a83ebc05739a as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/376122f885f74165ae71a83ebc05739a 2024-12-12T05:39:29,044 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/376122f885f74165ae71a83ebc05739a, entries=150, sequenceid=93, filesize=11.7 K 2024-12-12T05:39:29,046 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/50066d24316f40ab9451bb3af916461f as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/50066d24316f40ab9451bb3af916461f 2024-12-12T05:39:29,057 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/50066d24316f40ab9451bb3af916461f, entries=150, sequenceid=93, filesize=11.7 K 2024-12-12T05:39:29,058 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 92b0b352d91bdb0e121a8902637d8c5e in 139ms, sequenceid=93, compaction requested=true 2024-12-12T05:39:29,058 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:29,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92b0b352d91bdb0e121a8902637d8c5e:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:39:29,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:29,058 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:29,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92b0b352d91bdb0e121a8902637d8c5e:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:39:29,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:29,058 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:29,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92b0b352d91bdb0e121a8902637d8c5e:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:39:29,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:29,060 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:29,060 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 92b0b352d91bdb0e121a8902637d8c5e/B is initiating minor compaction (all files) 2024-12-12T05:39:29,060 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92b0b352d91bdb0e121a8902637d8c5e/B in TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:29,060 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/0088bff5b2d74fb3873e99c2fdd44fd5, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/88c3e5bac7c741f4852ba866bb799405, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/376122f885f74165ae71a83ebc05739a] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp, totalSize=35.3 K 2024-12-12T05:39:29,061 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101562 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:29,061 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 92b0b352d91bdb0e121a8902637d8c5e/A is initiating minor compaction (all files) 2024-12-12T05:39:29,061 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92b0b352d91bdb0e121a8902637d8c5e/A in TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:29,061 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/83da5e0af8e94600a9c358139e0721c2, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/df7de6f3b15942f4b2cfa8984eb76b15, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/531efc3bad5248ca860b8c527b554b1e] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp, totalSize=99.2 K 2024-12-12T05:39:29,061 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:29,061 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. files: [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/83da5e0af8e94600a9c358139e0721c2, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/df7de6f3b15942f4b2cfa8984eb76b15, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/531efc3bad5248ca860b8c527b554b1e] 2024-12-12T05:39:29,062 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 0088bff5b2d74fb3873e99c2fdd44fd5, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733981967394 2024-12-12T05:39:29,062 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 88c3e5bac7c741f4852ba866bb799405, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733981968042 2024-12-12T05:39:29,062 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 83da5e0af8e94600a9c358139e0721c2, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733981967394 2024-12-12T05:39:29,063 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting df7de6f3b15942f4b2cfa8984eb76b15, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733981968042 2024-12-12T05:39:29,063 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 376122f885f74165ae71a83ebc05739a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733981968363 2024-12-12T05:39:29,063 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 531efc3bad5248ca860b8c527b554b1e, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733981968363 2024-12-12T05:39:29,080 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:29,082 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92b0b352d91bdb0e121a8902637d8c5e#B#compaction#139 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:29,082 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/dee646a6e8f74519863e49cad6ff3509 is 50, key is test_row_0/B:col10/1733981968369/Put/seqid=0 2024-12-12T05:39:29,085 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212fe62ead2344b4fa8ac3b6a0352bad14a_92b0b352d91bdb0e121a8902637d8c5e store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:29,088 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212fe62ead2344b4fa8ac3b6a0352bad14a_92b0b352d91bdb0e121a8902637d8c5e, store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:29,088 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212fe62ead2344b4fa8ac3b6a0352bad14a_92b0b352d91bdb0e121a8902637d8c5e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:29,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:29,096 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 92b0b352d91bdb0e121a8902637d8c5e 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T05:39:29,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=A 2024-12-12T05:39:29,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:29,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=B 2024-12-12T05:39:29,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:29,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=C 2024-12-12T05:39:29,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:29,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741986_1162 (size=12207) 2024-12-12T05:39:29,109 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:29,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:29,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982029104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:29,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982029105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:29,113 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:29,113 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:29,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982029109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:29,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982029110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:29,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741987_1163 (size=4469) 2024-12-12T05:39:29,134 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212f4a6308ee6d7467fa7300f472353c376_92b0b352d91bdb0e121a8902637d8c5e is 50, key is test_row_0/A:col10/1733981968986/Put/seqid=0 2024-12-12T05:39:29,134 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92b0b352d91bdb0e121a8902637d8c5e#A#compaction#138 average throughput is 0.45 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:29,134 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/9a0eb16fbd0a4fbc87e6f8d30e22151f is 175, key is test_row_0/A:col10/1733981968369/Put/seqid=0 2024-12-12T05:39:29,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741988_1164 (size=31161) 2024-12-12T05:39:29,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741989_1165 (size=12154) 2024-12-12T05:39:29,171 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:29,177 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212f4a6308ee6d7467fa7300f472353c376_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212f4a6308ee6d7467fa7300f472353c376_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:29,178 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/22a7621d750a465482c3441f566ceb5a, store: [table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:29,179 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/22a7621d750a465482c3441f566ceb5a is 175, key is test_row_0/A:col10/1733981968986/Put/seqid=0 2024-12-12T05:39:29,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741990_1166 (size=30955) 2024-12-12T05:39:29,185 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=117, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/22a7621d750a465482c3441f566ceb5a 2024-12-12T05:39:29,196 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/0aae8439b507428fa612da89ebfa0926 is 50, key is test_row_0/B:col10/1733981968986/Put/seqid=0 2024-12-12T05:39:29,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741991_1167 (size=12001) 2024-12-12T05:39:29,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:29,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982029211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:29,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:29,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982029211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:29,216 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:29,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982029216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:29,216 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:29,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982029216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:29,417 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:29,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982029416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:29,418 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:29,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982029417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:29,420 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:29,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982029418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:29,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:29,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982029419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:29,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:29,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45324 deadline: 1733982029496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:29,511 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/dee646a6e8f74519863e49cad6ff3509 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/dee646a6e8f74519863e49cad6ff3509 2024-12-12T05:39:29,520 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92b0b352d91bdb0e121a8902637d8c5e/B of 92b0b352d91bdb0e121a8902637d8c5e into dee646a6e8f74519863e49cad6ff3509(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:29,520 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:29,520 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., storeName=92b0b352d91bdb0e121a8902637d8c5e/B, priority=13, startTime=1733981969058; duration=0sec 2024-12-12T05:39:29,520 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:29,520 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92b0b352d91bdb0e121a8902637d8c5e:B 2024-12-12T05:39:29,520 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:29,521 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:29,521 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 92b0b352d91bdb0e121a8902637d8c5e/C is initiating minor compaction (all files) 2024-12-12T05:39:29,521 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92b0b352d91bdb0e121a8902637d8c5e/C in TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:29,521 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/871888ecbdd44e19a903fb950c343efc, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/2600738e975a41899b75711082c9f2f8, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/50066d24316f40ab9451bb3af916461f] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp, totalSize=35.3 K 2024-12-12T05:39:29,522 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 871888ecbdd44e19a903fb950c343efc, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733981967394 2024-12-12T05:39:29,523 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 2600738e975a41899b75711082c9f2f8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733981968042 2024-12-12T05:39:29,523 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 50066d24316f40ab9451bb3af916461f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733981968363 2024-12-12T05:39:29,533 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92b0b352d91bdb0e121a8902637d8c5e#C#compaction#142 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:29,533 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/45de93ab564b4cf0b769194968f5a17d is 50, key is test_row_0/C:col10/1733981968369/Put/seqid=0 2024-12-12T05:39:29,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741992_1168 (size=12207) 2024-12-12T05:39:29,557 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/45de93ab564b4cf0b769194968f5a17d as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/45de93ab564b4cf0b769194968f5a17d 2024-12-12T05:39:29,568 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/9a0eb16fbd0a4fbc87e6f8d30e22151f as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/9a0eb16fbd0a4fbc87e6f8d30e22151f 2024-12-12T05:39:29,568 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92b0b352d91bdb0e121a8902637d8c5e/C of 92b0b352d91bdb0e121a8902637d8c5e into 45de93ab564b4cf0b769194968f5a17d(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:29,569 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:29,569 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., storeName=92b0b352d91bdb0e121a8902637d8c5e/C, priority=13, startTime=1733981969058; duration=0sec 2024-12-12T05:39:29,569 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:29,569 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92b0b352d91bdb0e121a8902637d8c5e:C 2024-12-12T05:39:29,575 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92b0b352d91bdb0e121a8902637d8c5e/A of 92b0b352d91bdb0e121a8902637d8c5e into 9a0eb16fbd0a4fbc87e6f8d30e22151f(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:29,576 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:29,576 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., storeName=92b0b352d91bdb0e121a8902637d8c5e/A, priority=13, startTime=1733981969058; duration=0sec 2024-12-12T05:39:29,576 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:29,576 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92b0b352d91bdb0e121a8902637d8c5e:A 2024-12-12T05:39:29,610 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/0aae8439b507428fa612da89ebfa0926 2024-12-12T05:39:29,619 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/bbfdd1a9334a43838ac54b083b6a3584 is 50, key is test_row_0/C:col10/1733981968986/Put/seqid=0 2024-12-12T05:39:29,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741993_1169 (size=12001) 2024-12-12T05:39:29,626 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/bbfdd1a9334a43838ac54b083b6a3584 2024-12-12T05:39:29,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/22a7621d750a465482c3441f566ceb5a as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/22a7621d750a465482c3441f566ceb5a 2024-12-12T05:39:29,639 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/22a7621d750a465482c3441f566ceb5a, entries=150, sequenceid=117, filesize=30.2 K 2024-12-12T05:39:29,640 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/0aae8439b507428fa612da89ebfa0926 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/0aae8439b507428fa612da89ebfa0926 2024-12-12T05:39:29,645 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/0aae8439b507428fa612da89ebfa0926, entries=150, sequenceid=117, filesize=11.7 K 2024-12-12T05:39:29,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/bbfdd1a9334a43838ac54b083b6a3584 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/bbfdd1a9334a43838ac54b083b6a3584 2024-12-12T05:39:29,652 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/bbfdd1a9334a43838ac54b083b6a3584, entries=150, sequenceid=117, filesize=11.7 K 2024-12-12T05:39:29,654 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 92b0b352d91bdb0e121a8902637d8c5e in 559ms, sequenceid=117, compaction requested=false 2024-12-12T05:39:29,654 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:29,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:29,723 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 92b0b352d91bdb0e121a8902637d8c5e 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-12T05:39:29,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=A 2024-12-12T05:39:29,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:29,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=B 2024-12-12T05:39:29,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:29,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=C 2024-12-12T05:39:29,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:29,732 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121239f58ada9324486fad7d830984c3e400_92b0b352d91bdb0e121a8902637d8c5e is 50, key is test_row_0/A:col10/1733981969103/Put/seqid=0 2024-12-12T05:39:29,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741994_1170 (size=17184) 2024-12-12T05:39:29,746 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:29,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:29,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982029746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:29,753 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121239f58ada9324486fad7d830984c3e400_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121239f58ada9324486fad7d830984c3e400_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:29,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:29,754 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:29,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982029750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:29,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982029751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:29,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:29,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982029752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:29,755 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/53e0906053944099ac77fe499eb46d08, store: [table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:29,756 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/53e0906053944099ac77fe499eb46d08 is 175, key is test_row_0/A:col10/1733981969103/Put/seqid=0 2024-12-12T05:39:29,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741995_1171 (size=48289) 2024-12-12T05:39:29,762 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=135, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/53e0906053944099ac77fe499eb46d08 2024-12-12T05:39:29,789 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/c3cb0b69ccc142fcb27bae2e18fd5ecd is 50, key is test_row_0/B:col10/1733981969103/Put/seqid=0 2024-12-12T05:39:29,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741996_1172 (size=12101) 2024-12-12T05:39:29,799 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/c3cb0b69ccc142fcb27bae2e18fd5ecd 2024-12-12T05:39:29,812 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/6fbe1bb659f34696ac57c6b8240ae19a is 50, key is test_row_0/C:col10/1733981969103/Put/seqid=0 2024-12-12T05:39:29,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741997_1173 (size=12101) 2024-12-12T05:39:29,832 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/6fbe1bb659f34696ac57c6b8240ae19a 2024-12-12T05:39:29,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/53e0906053944099ac77fe499eb46d08 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/53e0906053944099ac77fe499eb46d08 2024-12-12T05:39:29,844 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/53e0906053944099ac77fe499eb46d08, entries=250, sequenceid=135, filesize=47.2 K 2024-12-12T05:39:29,846 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/c3cb0b69ccc142fcb27bae2e18fd5ecd as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/c3cb0b69ccc142fcb27bae2e18fd5ecd 2024-12-12T05:39:29,851 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/c3cb0b69ccc142fcb27bae2e18fd5ecd, entries=150, sequenceid=135, filesize=11.8 K 2024-12-12T05:39:29,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/6fbe1bb659f34696ac57c6b8240ae19a as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/6fbe1bb659f34696ac57c6b8240ae19a 2024-12-12T05:39:29,854 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:29,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982029851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:29,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:29,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982029857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:29,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:29,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982029857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:29,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:29,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982029857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:29,860 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/6fbe1bb659f34696ac57c6b8240ae19a, entries=150, sequenceid=135, filesize=11.8 K 2024-12-12T05:39:29,861 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 92b0b352d91bdb0e121a8902637d8c5e in 138ms, sequenceid=135, compaction requested=true 2024-12-12T05:39:29,861 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:29,861 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:29,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92b0b352d91bdb0e121a8902637d8c5e:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:39:29,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:29,861 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:29,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92b0b352d91bdb0e121a8902637d8c5e:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:39:29,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:29,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92b0b352d91bdb0e121a8902637d8c5e:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:39:29,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:29,862 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:29,862 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110405 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:29,862 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 92b0b352d91bdb0e121a8902637d8c5e/B is initiating minor compaction (all files) 2024-12-12T05:39:29,862 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 92b0b352d91bdb0e121a8902637d8c5e/A is initiating minor compaction (all files) 2024-12-12T05:39:29,862 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92b0b352d91bdb0e121a8902637d8c5e/B in TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:29,862 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92b0b352d91bdb0e121a8902637d8c5e/A in TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:29,862 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/dee646a6e8f74519863e49cad6ff3509, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/0aae8439b507428fa612da89ebfa0926, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/c3cb0b69ccc142fcb27bae2e18fd5ecd] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp, totalSize=35.5 K 2024-12-12T05:39:29,863 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/9a0eb16fbd0a4fbc87e6f8d30e22151f, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/22a7621d750a465482c3441f566ceb5a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/53e0906053944099ac77fe499eb46d08] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp, totalSize=107.8 K 2024-12-12T05:39:29,863 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:29,863 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. files: [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/9a0eb16fbd0a4fbc87e6f8d30e22151f, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/22a7621d750a465482c3441f566ceb5a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/53e0906053944099ac77fe499eb46d08] 2024-12-12T05:39:29,863 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting dee646a6e8f74519863e49cad6ff3509, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733981968363 2024-12-12T05:39:29,863 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9a0eb16fbd0a4fbc87e6f8d30e22151f, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733981968363 2024-12-12T05:39:29,864 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 22a7621d750a465482c3441f566ceb5a, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733981968984 2024-12-12T05:39:29,864 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 0aae8439b507428fa612da89ebfa0926, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733981968984 2024-12-12T05:39:29,864 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting c3cb0b69ccc142fcb27bae2e18fd5ecd, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733981969103 2024-12-12T05:39:29,864 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 53e0906053944099ac77fe499eb46d08, keycount=250, bloomtype=ROW, size=47.2 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733981969102 2024-12-12T05:39:29,870 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:29,871 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92b0b352d91bdb0e121a8902637d8c5e#B#compaction#147 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:29,872 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/b06b16ade0d64d90ab49d32eba2f1909 is 50, key is test_row_0/B:col10/1733981969103/Put/seqid=0 2024-12-12T05:39:29,876 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121286a0a74f0cd2429186c08f6e775c47ed_92b0b352d91bdb0e121a8902637d8c5e store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:29,878 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121286a0a74f0cd2429186c08f6e775c47ed_92b0b352d91bdb0e121a8902637d8c5e, store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:29,878 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121286a0a74f0cd2429186c08f6e775c47ed_92b0b352d91bdb0e121a8902637d8c5e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:29,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741998_1174 (size=12409) 2024-12-12T05:39:29,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741999_1175 (size=4469) 2024-12-12T05:39:29,898 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92b0b352d91bdb0e121a8902637d8c5e#A#compaction#148 average throughput is 0.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:29,899 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/65993ee67b2040b39a74f9ecb203aae8 is 175, key is test_row_0/A:col10/1733981969103/Put/seqid=0 2024-12-12T05:39:29,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742000_1176 (size=31363) 2024-12-12T05:39:29,911 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/65993ee67b2040b39a74f9ecb203aae8 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/65993ee67b2040b39a74f9ecb203aae8 2024-12-12T05:39:29,917 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92b0b352d91bdb0e121a8902637d8c5e/A of 92b0b352d91bdb0e121a8902637d8c5e into 65993ee67b2040b39a74f9ecb203aae8(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:29,917 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:29,917 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., storeName=92b0b352d91bdb0e121a8902637d8c5e/A, priority=13, startTime=1733981969861; duration=0sec 2024-12-12T05:39:29,917 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:29,917 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92b0b352d91bdb0e121a8902637d8c5e:A 2024-12-12T05:39:29,917 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:29,918 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:29,919 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 92b0b352d91bdb0e121a8902637d8c5e/C is initiating minor compaction (all files) 2024-12-12T05:39:29,919 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92b0b352d91bdb0e121a8902637d8c5e/C in TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:29,920 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/45de93ab564b4cf0b769194968f5a17d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/bbfdd1a9334a43838ac54b083b6a3584, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/6fbe1bb659f34696ac57c6b8240ae19a] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp, totalSize=35.5 K 2024-12-12T05:39:29,920 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45de93ab564b4cf0b769194968f5a17d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733981968363 2024-12-12T05:39:29,921 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting bbfdd1a9334a43838ac54b083b6a3584, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733981968984 2024-12-12T05:39:29,921 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6fbe1bb659f34696ac57c6b8240ae19a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733981969103 2024-12-12T05:39:29,931 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92b0b352d91bdb0e121a8902637d8c5e#C#compaction#149 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:29,931 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/fcbc7b81b42d47dfbb2186fd707dbec6 is 50, key is test_row_0/C:col10/1733981969103/Put/seqid=0 2024-12-12T05:39:29,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742001_1177 (size=12409) 2024-12-12T05:39:30,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:30,058 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 92b0b352d91bdb0e121a8902637d8c5e 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-12T05:39:30,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=A 2024-12-12T05:39:30,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:30,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=B 2024-12-12T05:39:30,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:30,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=C 2024-12-12T05:39:30,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:30,066 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212e220c3c506a14742b024ada4621718a3_92b0b352d91bdb0e121a8902637d8c5e is 50, key is test_row_0/A:col10/1733981969750/Put/seqid=0 2024-12-12T05:39:30,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:30,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982030069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:30,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:30,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982030069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:30,076 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:30,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982030073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:30,076 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:30,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982030075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:30,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742002_1178 (size=14794) 2024-12-12T05:39:30,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:30,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982030176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:30,178 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:30,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982030176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:30,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:30,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982030177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:30,180 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:30,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982030178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:30,294 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/b06b16ade0d64d90ab49d32eba2f1909 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/b06b16ade0d64d90ab49d32eba2f1909 2024-12-12T05:39:30,301 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92b0b352d91bdb0e121a8902637d8c5e/B of 92b0b352d91bdb0e121a8902637d8c5e into b06b16ade0d64d90ab49d32eba2f1909(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:30,301 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:30,301 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., storeName=92b0b352d91bdb0e121a8902637d8c5e/B, priority=13, startTime=1733981969861; duration=0sec 2024-12-12T05:39:30,301 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:30,301 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92b0b352d91bdb0e121a8902637d8c5e:B 2024-12-12T05:39:30,350 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/fcbc7b81b42d47dfbb2186fd707dbec6 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/fcbc7b81b42d47dfbb2186fd707dbec6 2024-12-12T05:39:30,356 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92b0b352d91bdb0e121a8902637d8c5e/C of 92b0b352d91bdb0e121a8902637d8c5e into fcbc7b81b42d47dfbb2186fd707dbec6(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:30,356 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:30,356 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., storeName=92b0b352d91bdb0e121a8902637d8c5e/C, priority=13, startTime=1733981969862; duration=0sec 2024-12-12T05:39:30,356 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:30,356 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92b0b352d91bdb0e121a8902637d8c5e:C 2024-12-12T05:39:30,379 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:30,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982030378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:30,383 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:30,383 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:30,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982030380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:30,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982030381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:30,384 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:30,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982030382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:30,486 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:30,489 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212e220c3c506a14742b024ada4621718a3_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212e220c3c506a14742b024ada4621718a3_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:30,490 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/52230461c55a43e1ae00479c36cf8ba8, store: [table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:30,491 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/52230461c55a43e1ae00479c36cf8ba8 is 175, key is test_row_0/A:col10/1733981969750/Put/seqid=0 2024-12-12T05:39:30,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742003_1179 (size=39749) 2024-12-12T05:39:30,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:30,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45324 deadline: 1733982030503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:30,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:30,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982030681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:30,685 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:30,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982030685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:30,686 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:30,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982030686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:30,687 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:30,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982030686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:30,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-12T05:39:30,801 INFO [Thread-680 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-12-12T05:39:30,802 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:39:30,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-12-12T05:39:30,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-12T05:39:30,803 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:39:30,804 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:39:30,804 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:39:30,896 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=159, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/52230461c55a43e1ae00479c36cf8ba8 2024-12-12T05:39:30,904 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/6ed0383cc09345c391cacedfcba2818d is 50, key is test_row_0/B:col10/1733981969750/Put/seqid=0 2024-12-12T05:39:30,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-12T05:39:30,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742004_1180 (size=12151) 2024-12-12T05:39:30,956 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:30,956 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-12T05:39:30,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:30,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:30,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:30,957 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:30,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:30,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:31,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-12T05:39:31,109 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:31,109 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-12T05:39:31,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:31,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:31,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:31,110 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:31,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:31,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:31,188 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:31,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982031187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:31,188 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:31,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982031187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:31,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:31,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982031189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:31,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:31,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982031189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:31,261 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:31,262 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-12T05:39:31,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:31,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:31,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:31,262 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:31,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:31,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:31,309 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/6ed0383cc09345c391cacedfcba2818d 2024-12-12T05:39:31,316 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/a505ca3bb82a4c36b504e636b5fb8d05 is 50, key is test_row_0/C:col10/1733981969750/Put/seqid=0 2024-12-12T05:39:31,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742005_1181 (size=12151) 2024-12-12T05:39:31,321 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/a505ca3bb82a4c36b504e636b5fb8d05 2024-12-12T05:39:31,327 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/52230461c55a43e1ae00479c36cf8ba8 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/52230461c55a43e1ae00479c36cf8ba8 2024-12-12T05:39:31,332 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/52230461c55a43e1ae00479c36cf8ba8, entries=200, sequenceid=159, filesize=38.8 K 2024-12-12T05:39:31,334 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/6ed0383cc09345c391cacedfcba2818d as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/6ed0383cc09345c391cacedfcba2818d 2024-12-12T05:39:31,339 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/6ed0383cc09345c391cacedfcba2818d, entries=150, sequenceid=159, filesize=11.9 K 2024-12-12T05:39:31,340 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/a505ca3bb82a4c36b504e636b5fb8d05 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/a505ca3bb82a4c36b504e636b5fb8d05 2024-12-12T05:39:31,344 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/a505ca3bb82a4c36b504e636b5fb8d05, entries=150, sequenceid=159, filesize=11.9 K 2024-12-12T05:39:31,344 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 92b0b352d91bdb0e121a8902637d8c5e in 1286ms, sequenceid=159, compaction requested=false 2024-12-12T05:39:31,344 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:31,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-12T05:39:31,414 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:31,415 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-12T05:39:31,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:31,415 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 92b0b352d91bdb0e121a8902637d8c5e 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-12T05:39:31,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=A 2024-12-12T05:39:31,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:31,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=B 2024-12-12T05:39:31,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:31,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=C 2024-12-12T05:39:31,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:31,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212ca2f9c4b05a4423888d5b6c5a827aa90_92b0b352d91bdb0e121a8902637d8c5e is 50, key is test_row_0/A:col10/1733981970067/Put/seqid=0 2024-12-12T05:39:31,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742006_1182 (size=12304) 2024-12-12T05:39:31,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,478 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212ca2f9c4b05a4423888d5b6c5a827aa90_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212ca2f9c4b05a4423888d5b6c5a827aa90_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:31,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/346e3938a33b496b88f6e34fbd41807b, store: [table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:31,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/346e3938a33b496b88f6e34fbd41807b is 175, key is test_row_0/A:col10/1733981970067/Put/seqid=0 2024-12-12T05:39:31,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742007_1183 (size=31105) 2024-12-12T05:39:31,500 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=174, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/346e3938a33b496b88f6e34fbd41807b 2024-12-12T05:39:31,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/90cc914f62bb49bf97c3d739d5af0cbc is 50, key is test_row_0/B:col10/1733981970067/Put/seqid=0 2024-12-12T05:39:31,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742008_1184 (size=12151) 2024-12-12T05:39:31,527 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/90cc914f62bb49bf97c3d739d5af0cbc 2024-12-12T05:39:31,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/02daf90390d343e993f24612f69ac4d0 is 50, key is test_row_0/C:col10/1733981970067/Put/seqid=0 2024-12-12T05:39:31,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742009_1185 (size=12151) 2024-12-12T05:39:31,542 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/02daf90390d343e993f24612f69ac4d0 2024-12-12T05:39:31,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/346e3938a33b496b88f6e34fbd41807b as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/346e3938a33b496b88f6e34fbd41807b 2024-12-12T05:39:31,556 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/346e3938a33b496b88f6e34fbd41807b, entries=150, sequenceid=174, filesize=30.4 K 2024-12-12T05:39:31,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/90cc914f62bb49bf97c3d739d5af0cbc as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/90cc914f62bb49bf97c3d739d5af0cbc 2024-12-12T05:39:31,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,563 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/90cc914f62bb49bf97c3d739d5af0cbc, entries=150, sequenceid=174, filesize=11.9 K 2024-12-12T05:39:31,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/02daf90390d343e993f24612f69ac4d0 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/02daf90390d343e993f24612f69ac4d0 2024-12-12T05:39:31,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,574 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/02daf90390d343e993f24612f69ac4d0, entries=150, sequenceid=174, filesize=11.9 K 2024-12-12T05:39:31,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,575 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=0 B/0 for 92b0b352d91bdb0e121a8902637d8c5e in 160ms, sequenceid=174, compaction requested=true 2024-12-12T05:39:31,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:31,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:31,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-12T05:39:31,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-12-12T05:39:31,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,579 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-12-12T05:39:31,579 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 773 msec 2024-12-12T05:39:31,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,581 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 778 msec 2024-12-12T05:39:31,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-12T05:39:31,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,907 INFO [Thread-680 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-12-12T05:39:31,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,909 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:39:31,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-12-12T05:39:31,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,910 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:39:31,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-12T05:39:31,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,911 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:39:31,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,911 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:39:31,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:31,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-12T05:39:32,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,063 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:32,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,064 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-12T05:39:32,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:32,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:32,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:32,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-12T05:39:32,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-12-12T05:39:32,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,067 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-12-12T05:39:32,067 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 154 msec 2024-12-12T05:39:32,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,068 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 158 msec 2024-12-12T05:39:32,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-12T05:39:32,214 INFO [Thread-680 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-12-12T05:39:32,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,216 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:39:32,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-12-12T05:39:32,217 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:39:32,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,218 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:39:32,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-12T05:39:32,218 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:39:32,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:32,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,225 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 92b0b352d91bdb0e121a8902637d8c5e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T05:39:32,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=A 2024-12-12T05:39:32,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:32,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=B 2024-12-12T05:39:32,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:32,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=C 2024-12-12T05:39:32,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:32,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,245 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412127d0b28bc6eb549cbb0889e476984a044_92b0b352d91bdb0e121a8902637d8c5e is 50, key is test_row_0/A:col10/1733981972223/Put/seqid=0 2024-12-12T05:39:32,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:32,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982032269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:32,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,278 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:32,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:32,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982032273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:32,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982032274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:32,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:32,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982032279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:32,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742011_1187 (size=24758) 2024-12-12T05:39:32,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-12T05:39:32,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,370 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:32,370 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-12T05:39:32,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:32,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:32,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:32,371 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:32,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:32,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:32,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:32,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982032375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:32,382 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:32,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982032379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:32,382 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:32,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982032379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:32,384 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:32,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982032383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:32,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-12T05:39:32,523 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:32,523 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-12T05:39:32,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:32,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:32,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:32,523 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:32,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:32,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:32,524 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:32,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45324 deadline: 1733982032522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:32,526 DEBUG [Thread-678 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4158 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., hostname=83e80bf221ca,46457,1733981928566, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:39:32,579 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:32,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982032577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:32,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:32,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982032583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:32,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:32,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982032584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:32,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:32,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982032585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:32,675 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:32,675 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-12T05:39:32,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:32,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:32,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:32,676 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:32,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:32,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:32,706 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:32,715 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412127d0b28bc6eb549cbb0889e476984a044_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412127d0b28bc6eb549cbb0889e476984a044_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:32,716 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/0857c13656d84536a389deb8cdc03f5a, store: [table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:32,717 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/0857c13656d84536a389deb8cdc03f5a is 175, key is test_row_0/A:col10/1733981972223/Put/seqid=0 2024-12-12T05:39:32,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742010_1186 (size=74395) 2024-12-12T05:39:32,720 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=186, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/0857c13656d84536a389deb8cdc03f5a 2024-12-12T05:39:32,727 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/a220144071cc4454a789131edd4ee8b1 is 50, key is test_row_0/B:col10/1733981972223/Put/seqid=0 2024-12-12T05:39:32,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742012_1188 (size=12151) 2024-12-12T05:39:32,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-12T05:39:32,828 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:32,828 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-12T05:39:32,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:32,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:32,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:32,828 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:32,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:32,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:32,883 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:32,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982032880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:32,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:32,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982032887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:32,889 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:32,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982032889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:32,891 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:32,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982032889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:32,980 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:32,981 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-12T05:39:32,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:32,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:32,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:32,981 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:32,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:32,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:33,133 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:33,133 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=186 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/a220144071cc4454a789131edd4ee8b1 2024-12-12T05:39:33,134 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-12T05:39:33,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:33,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:33,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:33,134 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:33,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:33,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:33,143 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/5f8a12a01b07421f8048db32c07e0e54 is 50, key is test_row_0/C:col10/1733981972223/Put/seqid=0 2024-12-12T05:39:33,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742013_1189 (size=12151) 2024-12-12T05:39:33,286 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:33,287 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-12T05:39:33,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:33,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:33,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:33,287 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:33,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:33,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:33,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-12T05:39:33,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:33,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982033387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:33,392 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:33,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982033390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:33,396 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:33,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982033393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:33,396 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:33,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982033393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:33,439 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:33,440 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-12T05:39:33,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:33,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:33,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:33,440 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:33,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:33,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:33,554 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=186 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/5f8a12a01b07421f8048db32c07e0e54 2024-12-12T05:39:33,558 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/0857c13656d84536a389deb8cdc03f5a as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/0857c13656d84536a389deb8cdc03f5a 2024-12-12T05:39:33,562 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/0857c13656d84536a389deb8cdc03f5a, entries=400, sequenceid=186, filesize=72.7 K 2024-12-12T05:39:33,563 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/a220144071cc4454a789131edd4ee8b1 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/a220144071cc4454a789131edd4ee8b1 2024-12-12T05:39:33,569 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/a220144071cc4454a789131edd4ee8b1, entries=150, sequenceid=186, filesize=11.9 K 2024-12-12T05:39:33,570 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/5f8a12a01b07421f8048db32c07e0e54 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/5f8a12a01b07421f8048db32c07e0e54 2024-12-12T05:39:33,574 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/5f8a12a01b07421f8048db32c07e0e54, entries=150, sequenceid=186, filesize=11.9 K 2024-12-12T05:39:33,575 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 92b0b352d91bdb0e121a8902637d8c5e in 1350ms, sequenceid=186, compaction requested=true 2024-12-12T05:39:33,575 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:33,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92b0b352d91bdb0e121a8902637d8c5e:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:39:33,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:33,575 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T05:39:33,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92b0b352d91bdb0e121a8902637d8c5e:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:39:33,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:33,576 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T05:39:33,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92b0b352d91bdb0e121a8902637d8c5e:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:39:33,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:33,577 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 176612 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T05:39:33,577 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 92b0b352d91bdb0e121a8902637d8c5e/A is initiating minor compaction (all files) 2024-12-12T05:39:33,577 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92b0b352d91bdb0e121a8902637d8c5e/A in TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:33,577 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/65993ee67b2040b39a74f9ecb203aae8, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/52230461c55a43e1ae00479c36cf8ba8, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/346e3938a33b496b88f6e34fbd41807b, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/0857c13656d84536a389deb8cdc03f5a] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp, totalSize=172.5 K 2024-12-12T05:39:33,577 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:33,578 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. files: [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/65993ee67b2040b39a74f9ecb203aae8, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/52230461c55a43e1ae00479c36cf8ba8, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/346e3938a33b496b88f6e34fbd41807b, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/0857c13656d84536a389deb8cdc03f5a] 2024-12-12T05:39:33,578 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48862 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T05:39:33,578 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 92b0b352d91bdb0e121a8902637d8c5e/B is initiating minor compaction (all files) 2024-12-12T05:39:33,578 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 65993ee67b2040b39a74f9ecb203aae8, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733981969103 2024-12-12T05:39:33,578 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92b0b352d91bdb0e121a8902637d8c5e/B in TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:33,578 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/b06b16ade0d64d90ab49d32eba2f1909, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/6ed0383cc09345c391cacedfcba2818d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/90cc914f62bb49bf97c3d739d5af0cbc, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/a220144071cc4454a789131edd4ee8b1] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp, totalSize=47.7 K 2024-12-12T05:39:33,579 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52230461c55a43e1ae00479c36cf8ba8, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1733981969748 2024-12-12T05:39:33,579 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting b06b16ade0d64d90ab49d32eba2f1909, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733981969103 2024-12-12T05:39:33,579 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 346e3938a33b496b88f6e34fbd41807b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1733981970067 2024-12-12T05:39:33,579 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ed0383cc09345c391cacedfcba2818d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1733981969748 2024-12-12T05:39:33,579 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0857c13656d84536a389deb8cdc03f5a, keycount=400, bloomtype=ROW, size=72.7 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1733981972213 2024-12-12T05:39:33,580 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 90cc914f62bb49bf97c3d739d5af0cbc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1733981970067 2024-12-12T05:39:33,581 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting a220144071cc4454a789131edd4ee8b1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1733981972222 2024-12-12T05:39:33,592 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:33,593 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-12T05:39:33,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:33,593 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing 92b0b352d91bdb0e121a8902637d8c5e 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T05:39:33,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=A 2024-12-12T05:39:33,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:33,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=B 2024-12-12T05:39:33,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:33,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=C 2024-12-12T05:39:33,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:33,608 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:33,618 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92b0b352d91bdb0e121a8902637d8c5e#B#compaction#160 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:33,618 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/e54091c9b7124168aaa95321958ad346 is 50, key is test_row_0/B:col10/1733981972223/Put/seqid=0 2024-12-12T05:39:33,628 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212da1f722d7a854659a0cec412f9407e8a_92b0b352d91bdb0e121a8902637d8c5e store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:33,632 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212da1f722d7a854659a0cec412f9407e8a_92b0b352d91bdb0e121a8902637d8c5e, store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:33,632 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212da1f722d7a854659a0cec412f9407e8a_92b0b352d91bdb0e121a8902637d8c5e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:33,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212d4eb1dc6bec64c4cb4d99117495515fb_92b0b352d91bdb0e121a8902637d8c5e is 50, key is test_row_0/A:col10/1733981972254/Put/seqid=0 2024-12-12T05:39:33,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742014_1190 (size=12595) 2024-12-12T05:39:33,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742015_1191 (size=4469) 2024-12-12T05:39:33,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742016_1192 (size=12304) 2024-12-12T05:39:33,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:33,673 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212d4eb1dc6bec64c4cb4d99117495515fb_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212d4eb1dc6bec64c4cb4d99117495515fb_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:33,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/fc02a13dbb0e4e37928f32159dd238f9, store: [table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:33,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/fc02a13dbb0e4e37928f32159dd238f9 is 175, key is test_row_0/A:col10/1733981972254/Put/seqid=0 2024-12-12T05:39:33,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742017_1193 (size=31105) 2024-12-12T05:39:34,052 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92b0b352d91bdb0e121a8902637d8c5e#A#compaction#159 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:34,053 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/29b67554d4d04fe0bec43e64b1209a51 is 175, key is test_row_0/A:col10/1733981972223/Put/seqid=0 2024-12-12T05:39:34,054 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/e54091c9b7124168aaa95321958ad346 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/e54091c9b7124168aaa95321958ad346 2024-12-12T05:39:34,061 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 92b0b352d91bdb0e121a8902637d8c5e/B of 92b0b352d91bdb0e121a8902637d8c5e into e54091c9b7124168aaa95321958ad346(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:34,061 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:34,061 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., storeName=92b0b352d91bdb0e121a8902637d8c5e/B, priority=12, startTime=1733981973576; duration=0sec 2024-12-12T05:39:34,061 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:34,061 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92b0b352d91bdb0e121a8902637d8c5e:B 2024-12-12T05:39:34,061 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T05:39:34,063 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48862 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T05:39:34,063 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 92b0b352d91bdb0e121a8902637d8c5e/C is initiating minor compaction (all files) 2024-12-12T05:39:34,063 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92b0b352d91bdb0e121a8902637d8c5e/C in TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:34,063 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/fcbc7b81b42d47dfbb2186fd707dbec6, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/a505ca3bb82a4c36b504e636b5fb8d05, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/02daf90390d343e993f24612f69ac4d0, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/5f8a12a01b07421f8048db32c07e0e54] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp, totalSize=47.7 K 2024-12-12T05:39:34,064 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting fcbc7b81b42d47dfbb2186fd707dbec6, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733981969103 2024-12-12T05:39:34,064 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting a505ca3bb82a4c36b504e636b5fb8d05, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1733981969748 2024-12-12T05:39:34,065 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 02daf90390d343e993f24612f69ac4d0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1733981970067 2024-12-12T05:39:34,065 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f8a12a01b07421f8048db32c07e0e54, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1733981972222 2024-12-12T05:39:34,082 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=211, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/fc02a13dbb0e4e37928f32159dd238f9 2024-12-12T05:39:34,082 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92b0b352d91bdb0e121a8902637d8c5e#C#compaction#162 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:34,083 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/1a2c2f419e934211a521b0015da8eadb is 50, key is test_row_0/C:col10/1733981972223/Put/seqid=0 2024-12-12T05:39:34,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742018_1194 (size=31549) 2024-12-12T05:39:34,099 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/29b67554d4d04fe0bec43e64b1209a51 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/29b67554d4d04fe0bec43e64b1209a51 2024-12-12T05:39:34,105 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 92b0b352d91bdb0e121a8902637d8c5e/A of 92b0b352d91bdb0e121a8902637d8c5e into 29b67554d4d04fe0bec43e64b1209a51(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:34,105 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:34,106 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., storeName=92b0b352d91bdb0e121a8902637d8c5e/A, priority=12, startTime=1733981973575; duration=0sec 2024-12-12T05:39:34,106 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:34,106 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92b0b352d91bdb0e121a8902637d8c5e:A 2024-12-12T05:39:34,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/e97b8ef614d445318746450c7dc1ff5d is 50, key is test_row_0/B:col10/1733981972254/Put/seqid=0 2024-12-12T05:39:34,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742019_1195 (size=12595) 2024-12-12T05:39:34,125 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/1a2c2f419e934211a521b0015da8eadb as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/1a2c2f419e934211a521b0015da8eadb 2024-12-12T05:39:34,132 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 92b0b352d91bdb0e121a8902637d8c5e/C of 92b0b352d91bdb0e121a8902637d8c5e into 1a2c2f419e934211a521b0015da8eadb(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:34,132 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:34,132 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., storeName=92b0b352d91bdb0e121a8902637d8c5e/C, priority=12, startTime=1733981973576; duration=0sec 2024-12-12T05:39:34,132 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:34,132 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92b0b352d91bdb0e121a8902637d8c5e:C 2024-12-12T05:39:34,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742020_1196 (size=12151) 2024-12-12T05:39:34,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-12T05:39:34,350 INFO [master/83e80bf221ca:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-12T05:39:34,350 INFO [master/83e80bf221ca:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-12T05:39:34,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:34,398 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:34,404 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:34,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:34,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982034403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:34,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982034403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:34,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:34,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982034404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:34,405 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:34,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982034405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:34,506 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:34,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982034506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:34,506 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:34,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982034506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:34,507 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:34,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982034506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:34,547 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/e97b8ef614d445318746450c7dc1ff5d 2024-12-12T05:39:34,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/2a98d6cb9a3044ab8f8152f0d5af484d is 50, key is test_row_0/C:col10/1733981972254/Put/seqid=0 2024-12-12T05:39:34,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742021_1197 (size=12151) 2024-12-12T05:39:34,707 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:34,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:34,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982034707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:34,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982034707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:34,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:34,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982034707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:34,960 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/2a98d6cb9a3044ab8f8152f0d5af484d 2024-12-12T05:39:34,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/fc02a13dbb0e4e37928f32159dd238f9 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/fc02a13dbb0e4e37928f32159dd238f9 2024-12-12T05:39:34,969 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/fc02a13dbb0e4e37928f32159dd238f9, entries=150, sequenceid=211, filesize=30.4 K 2024-12-12T05:39:34,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/e97b8ef614d445318746450c7dc1ff5d as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/e97b8ef614d445318746450c7dc1ff5d 2024-12-12T05:39:34,973 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/e97b8ef614d445318746450c7dc1ff5d, entries=150, sequenceid=211, filesize=11.9 K 2024-12-12T05:39:34,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/2a98d6cb9a3044ab8f8152f0d5af484d as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/2a98d6cb9a3044ab8f8152f0d5af484d 2024-12-12T05:39:34,979 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/2a98d6cb9a3044ab8f8152f0d5af484d, entries=150, sequenceid=211, filesize=11.9 K 2024-12-12T05:39:34,979 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 92b0b352d91bdb0e121a8902637d8c5e in 1386ms, sequenceid=211, compaction requested=false 2024-12-12T05:39:34,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:34,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:34,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-12-12T05:39:34,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-12-12T05:39:34,982 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-12-12T05:39:34,982 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7630 sec 2024-12-12T05:39:34,983 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 2.7660 sec 2024-12-12T05:39:35,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:35,011 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 92b0b352d91bdb0e121a8902637d8c5e 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T05:39:35,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=A 2024-12-12T05:39:35,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:35,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=B 2024-12-12T05:39:35,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:35,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=C 2024-12-12T05:39:35,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:35,019 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412123f958f8eb4584d13bffdbefc8e26d191_92b0b352d91bdb0e121a8902637d8c5e is 50, key is test_row_0/A:col10/1733981974397/Put/seqid=0 2024-12-12T05:39:35,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742022_1198 (size=14794) 2024-12-12T05:39:35,034 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:35,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982035031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:35,034 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:35,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982035032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:35,036 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:35,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982035034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:35,136 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:35,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982035135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:35,136 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:35,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982035135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:35,137 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:35,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982035136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:35,339 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:35,339 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:35,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982035337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:35,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982035338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:35,339 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:35,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982035339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:35,423 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:35,427 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412123f958f8eb4584d13bffdbefc8e26d191_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123f958f8eb4584d13bffdbefc8e26d191_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:35,428 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/33d8ef679c12427eb2095af284549348, store: [table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:35,429 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/33d8ef679c12427eb2095af284549348 is 175, key is test_row_0/A:col10/1733981974397/Put/seqid=0 2024-12-12T05:39:35,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742023_1199 (size=39749) 2024-12-12T05:39:35,434 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=227, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/33d8ef679c12427eb2095af284549348 2024-12-12T05:39:35,442 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/8e78d6cc220b4ce3a3e5bf8230d46f82 is 50, key is test_row_0/B:col10/1733981974397/Put/seqid=0 2024-12-12T05:39:35,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742024_1200 (size=12151) 2024-12-12T05:39:35,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:35,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982035640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:35,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:35,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982035642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:35,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:35,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982035642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:35,846 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=227 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/8e78d6cc220b4ce3a3e5bf8230d46f82 2024-12-12T05:39:35,853 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/9659928f870f430dadb91d5cde9ce6df is 50, key is test_row_0/C:col10/1733981974397/Put/seqid=0 2024-12-12T05:39:35,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742025_1201 (size=12151) 2024-12-12T05:39:36,145 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:36,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982036144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:36,147 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:36,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982036146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:36,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:36,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982036147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:36,257 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=227 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/9659928f870f430dadb91d5cde9ce6df 2024-12-12T05:39:36,262 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/33d8ef679c12427eb2095af284549348 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/33d8ef679c12427eb2095af284549348 2024-12-12T05:39:36,267 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/33d8ef679c12427eb2095af284549348, entries=200, sequenceid=227, filesize=38.8 K 2024-12-12T05:39:36,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/8e78d6cc220b4ce3a3e5bf8230d46f82 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/8e78d6cc220b4ce3a3e5bf8230d46f82 2024-12-12T05:39:36,272 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/8e78d6cc220b4ce3a3e5bf8230d46f82, entries=150, sequenceid=227, filesize=11.9 K 2024-12-12T05:39:36,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/9659928f870f430dadb91d5cde9ce6df as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/9659928f870f430dadb91d5cde9ce6df 2024-12-12T05:39:36,278 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/9659928f870f430dadb91d5cde9ce6df, entries=150, sequenceid=227, filesize=11.9 K 2024-12-12T05:39:36,279 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 92b0b352d91bdb0e121a8902637d8c5e in 1268ms, sequenceid=227, compaction requested=true 2024-12-12T05:39:36,279 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:36,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92b0b352d91bdb0e121a8902637d8c5e:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:39:36,279 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:36,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:36,280 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92b0b352d91bdb0e121a8902637d8c5e:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:39:36,280 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:36,280 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:36,280 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92b0b352d91bdb0e121a8902637d8c5e:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:39:36,280 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:36,281 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:36,281 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 92b0b352d91bdb0e121a8902637d8c5e/B is initiating minor compaction (all files) 2024-12-12T05:39:36,281 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92b0b352d91bdb0e121a8902637d8c5e/B in TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:36,281 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/e54091c9b7124168aaa95321958ad346, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/e97b8ef614d445318746450c7dc1ff5d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/8e78d6cc220b4ce3a3e5bf8230d46f82] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp, totalSize=36.0 K 2024-12-12T05:39:36,281 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102403 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:36,281 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 92b0b352d91bdb0e121a8902637d8c5e/A is initiating minor compaction (all files) 2024-12-12T05:39:36,281 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92b0b352d91bdb0e121a8902637d8c5e/A in TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:36,281 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/29b67554d4d04fe0bec43e64b1209a51, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/fc02a13dbb0e4e37928f32159dd238f9, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/33d8ef679c12427eb2095af284549348] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp, totalSize=100.0 K 2024-12-12T05:39:36,281 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:36,281 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. files: [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/29b67554d4d04fe0bec43e64b1209a51, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/fc02a13dbb0e4e37928f32159dd238f9, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/33d8ef679c12427eb2095af284549348] 2024-12-12T05:39:36,282 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting e54091c9b7124168aaa95321958ad346, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1733981972222 2024-12-12T05:39:36,282 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29b67554d4d04fe0bec43e64b1209a51, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1733981972222 2024-12-12T05:39:36,282 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting e97b8ef614d445318746450c7dc1ff5d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733981972254 2024-12-12T05:39:36,283 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc02a13dbb0e4e37928f32159dd238f9, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733981972254 2024-12-12T05:39:36,283 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e78d6cc220b4ce3a3e5bf8230d46f82, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1733981974397 2024-12-12T05:39:36,284 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 33d8ef679c12427eb2095af284549348, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1733981974397 2024-12-12T05:39:36,305 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92b0b352d91bdb0e121a8902637d8c5e#B#compaction#168 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:36,306 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/8c233e83d965402a821a0852f9ebd97f is 50, key is test_row_0/B:col10/1733981974397/Put/seqid=0 2024-12-12T05:39:36,307 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:36,310 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121250e053bc6e0048f49eab88c8c16d503a_92b0b352d91bdb0e121a8902637d8c5e store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:36,311 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121250e053bc6e0048f49eab88c8c16d503a_92b0b352d91bdb0e121a8902637d8c5e, store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:36,312 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121250e053bc6e0048f49eab88c8c16d503a_92b0b352d91bdb0e121a8902637d8c5e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:36,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-12T05:39:36,323 INFO [Thread-680 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-12-12T05:39:36,324 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:39:36,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-12-12T05:39:36,326 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:39:36,327 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:39:36,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-12T05:39:36,327 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:39:36,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742026_1202 (size=12697) 2024-12-12T05:39:36,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742027_1203 (size=4469) 2024-12-12T05:39:36,333 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92b0b352d91bdb0e121a8902637d8c5e#A#compaction#169 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:36,334 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/db3b66191e22418db4a0fba303675982 is 175, key is test_row_0/A:col10/1733981974397/Put/seqid=0 2024-12-12T05:39:36,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742028_1204 (size=31651) 2024-12-12T05:39:36,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:36,416 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 92b0b352d91bdb0e121a8902637d8c5e 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T05:39:36,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=A 2024-12-12T05:39:36,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:36,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=B 2024-12-12T05:39:36,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:36,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=C 2024-12-12T05:39:36,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:36,423 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212734deb8c84284ab1931357771c125a9c_92b0b352d91bdb0e121a8902637d8c5e is 50, key is test_row_0/A:col10/1733981975033/Put/seqid=0 2024-12-12T05:39:36,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-12T05:39:36,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742029_1205 (size=14794) 2024-12-12T05:39:36,432 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:36,436 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212734deb8c84284ab1931357771c125a9c_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212734deb8c84284ab1931357771c125a9c_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:36,438 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/3188d9498ee54189bc954b362fb48b03, store: [table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:36,440 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/3188d9498ee54189bc954b362fb48b03 is 175, key is test_row_0/A:col10/1733981975033/Put/seqid=0 2024-12-12T05:39:36,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742030_1206 (size=39749) 2024-12-12T05:39:36,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:36,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982036445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:36,478 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:36,479 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-12T05:39:36,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:36,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:36,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:36,479 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:36,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:36,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:36,549 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:36,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982036547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:36,555 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:36,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45324 deadline: 1733982036554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:36,556 DEBUG [Thread-678 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8189 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., hostname=83e80bf221ca,46457,1733981928566, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:39:36,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-12T05:39:36,631 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:36,631 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-12T05:39:36,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:36,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:36,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:36,632 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:36,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:36,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:36,734 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/8c233e83d965402a821a0852f9ebd97f as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/8c233e83d965402a821a0852f9ebd97f 2024-12-12T05:39:36,739 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92b0b352d91bdb0e121a8902637d8c5e/B of 92b0b352d91bdb0e121a8902637d8c5e into 8c233e83d965402a821a0852f9ebd97f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:36,739 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:36,739 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., storeName=92b0b352d91bdb0e121a8902637d8c5e/B, priority=13, startTime=1733981976279; duration=0sec 2024-12-12T05:39:36,740 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:36,740 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92b0b352d91bdb0e121a8902637d8c5e:B 2024-12-12T05:39:36,740 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:36,741 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:36,741 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 92b0b352d91bdb0e121a8902637d8c5e/C is initiating minor compaction (all files) 2024-12-12T05:39:36,741 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92b0b352d91bdb0e121a8902637d8c5e/C in TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:36,742 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/1a2c2f419e934211a521b0015da8eadb, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/2a98d6cb9a3044ab8f8152f0d5af484d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/9659928f870f430dadb91d5cde9ce6df] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp, totalSize=36.0 K 2024-12-12T05:39:36,743 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a2c2f419e934211a521b0015da8eadb, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1733981972222 2024-12-12T05:39:36,744 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/db3b66191e22418db4a0fba303675982 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/db3b66191e22418db4a0fba303675982 2024-12-12T05:39:36,744 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a98d6cb9a3044ab8f8152f0d5af484d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733981972254 2024-12-12T05:39:36,744 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 9659928f870f430dadb91d5cde9ce6df, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1733981974397 2024-12-12T05:39:36,748 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92b0b352d91bdb0e121a8902637d8c5e/A of 92b0b352d91bdb0e121a8902637d8c5e into db3b66191e22418db4a0fba303675982(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:36,748 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:36,749 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., storeName=92b0b352d91bdb0e121a8902637d8c5e/A, priority=13, startTime=1733981976279; duration=0sec 2024-12-12T05:39:36,749 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:36,749 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92b0b352d91bdb0e121a8902637d8c5e:A 2024-12-12T05:39:36,751 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92b0b352d91bdb0e121a8902637d8c5e#C#compaction#171 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:36,751 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/17c97b56a81541cdb66dd5f7db4f7743 is 50, key is test_row_0/C:col10/1733981974397/Put/seqid=0 2024-12-12T05:39:36,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:36,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982036752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:36,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742031_1207 (size=12697) 2024-12-12T05:39:36,760 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/17c97b56a81541cdb66dd5f7db4f7743 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/17c97b56a81541cdb66dd5f7db4f7743 2024-12-12T05:39:36,768 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92b0b352d91bdb0e121a8902637d8c5e/C of 92b0b352d91bdb0e121a8902637d8c5e into 17c97b56a81541cdb66dd5f7db4f7743(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:36,768 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:36,768 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., storeName=92b0b352d91bdb0e121a8902637d8c5e/C, priority=13, startTime=1733981976280; duration=0sec 2024-12-12T05:39:36,768 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:36,768 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92b0b352d91bdb0e121a8902637d8c5e:C 2024-12-12T05:39:36,785 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:36,785 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-12T05:39:36,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:36,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:36,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:36,785 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:36,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:36,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:36,846 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=251, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/3188d9498ee54189bc954b362fb48b03 2024-12-12T05:39:36,852 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/f735b93078cf4a629dcf5bd835c24389 is 50, key is test_row_0/B:col10/1733981975033/Put/seqid=0 2024-12-12T05:39:36,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742032_1208 (size=12151) 2024-12-12T05:39:36,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-12T05:39:36,937 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:36,937 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-12T05:39:36,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:36,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:36,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:36,938 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:36,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:36,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:37,056 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:37,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982037056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:37,090 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:37,090 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-12T05:39:37,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:37,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:37,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:37,090 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:37,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:37,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:37,149 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:37,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982037147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:37,152 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:37,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982037152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:37,160 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:37,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982037160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:37,242 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:37,243 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-12T05:39:37,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:37,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:37,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:37,243 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:37,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:37,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:37,257 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/f735b93078cf4a629dcf5bd835c24389 2024-12-12T05:39:37,265 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/bca5efa7719d47f4b65e0db2a0b8d209 is 50, key is test_row_0/C:col10/1733981975033/Put/seqid=0 2024-12-12T05:39:37,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742033_1209 (size=12151) 2024-12-12T05:39:37,270 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/bca5efa7719d47f4b65e0db2a0b8d209 2024-12-12T05:39:37,274 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/3188d9498ee54189bc954b362fb48b03 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/3188d9498ee54189bc954b362fb48b03 2024-12-12T05:39:37,279 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/3188d9498ee54189bc954b362fb48b03, entries=200, sequenceid=251, filesize=38.8 K 2024-12-12T05:39:37,280 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/f735b93078cf4a629dcf5bd835c24389 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/f735b93078cf4a629dcf5bd835c24389 2024-12-12T05:39:37,284 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/f735b93078cf4a629dcf5bd835c24389, entries=150, sequenceid=251, filesize=11.9 K 2024-12-12T05:39:37,286 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/bca5efa7719d47f4b65e0db2a0b8d209 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/bca5efa7719d47f4b65e0db2a0b8d209 2024-12-12T05:39:37,291 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/bca5efa7719d47f4b65e0db2a0b8d209, entries=150, sequenceid=251, filesize=11.9 K 2024-12-12T05:39:37,291 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 92b0b352d91bdb0e121a8902637d8c5e in 875ms, sequenceid=251, compaction requested=false 2024-12-12T05:39:37,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:37,395 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:37,395 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-12T05:39:37,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:37,395 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing 92b0b352d91bdb0e121a8902637d8c5e 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T05:39:37,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=A 2024-12-12T05:39:37,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:37,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=B 2024-12-12T05:39:37,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:37,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=C 2024-12-12T05:39:37,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:37,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412123753626ead564870bd2ee12312fa554d_92b0b352d91bdb0e121a8902637d8c5e is 50, key is test_row_0/A:col10/1733981976432/Put/seqid=0 2024-12-12T05:39:37,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742034_1210 (size=12454) 2024-12-12T05:39:37,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-12T05:39:37,560 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:37,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:37,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:37,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982037602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:37,707 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:37,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982037707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:37,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:37,820 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412123753626ead564870bd2ee12312fa554d_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123753626ead564870bd2ee12312fa554d_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:37,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/187c3e9cc61345dfbe74bca197c3b245, store: [table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:37,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/187c3e9cc61345dfbe74bca197c3b245 is 175, key is test_row_0/A:col10/1733981976432/Put/seqid=0 2024-12-12T05:39:37,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742035_1211 (size=31255) 2024-12-12T05:39:37,911 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:37,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982037909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:38,213 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:38,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982038212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:38,226 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=266, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/187c3e9cc61345dfbe74bca197c3b245 2024-12-12T05:39:38,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/62729d59b2804d43b581b0fdedcf2f6d is 50, key is test_row_0/B:col10/1733981976432/Put/seqid=0 2024-12-12T05:39:38,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742036_1212 (size=12301) 2024-12-12T05:39:38,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-12T05:39:38,636 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=266 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/62729d59b2804d43b581b0fdedcf2f6d 2024-12-12T05:39:38,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/8f82271716384adeb15abe0f276a8d75 is 50, key is test_row_0/C:col10/1733981976432/Put/seqid=0 2024-12-12T05:39:38,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742037_1213 (size=12301) 2024-12-12T05:39:38,718 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:38,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982038718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:39,047 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=266 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/8f82271716384adeb15abe0f276a8d75 2024-12-12T05:39:39,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/187c3e9cc61345dfbe74bca197c3b245 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/187c3e9cc61345dfbe74bca197c3b245 2024-12-12T05:39:39,059 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/187c3e9cc61345dfbe74bca197c3b245, entries=150, sequenceid=266, filesize=30.5 K 2024-12-12T05:39:39,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/62729d59b2804d43b581b0fdedcf2f6d as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/62729d59b2804d43b581b0fdedcf2f6d 2024-12-12T05:39:39,064 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/62729d59b2804d43b581b0fdedcf2f6d, entries=150, sequenceid=266, filesize=12.0 K 2024-12-12T05:39:39,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/8f82271716384adeb15abe0f276a8d75 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/8f82271716384adeb15abe0f276a8d75 2024-12-12T05:39:39,068 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/8f82271716384adeb15abe0f276a8d75, entries=150, sequenceid=266, filesize=12.0 K 2024-12-12T05:39:39,069 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 92b0b352d91bdb0e121a8902637d8c5e in 1674ms, sequenceid=266, compaction requested=true 2024-12-12T05:39:39,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:39,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:39,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-12-12T05:39:39,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-12-12T05:39:39,072 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-12-12T05:39:39,072 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7430 sec 2024-12-12T05:39:39,073 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 2.7480 sec 2024-12-12T05:39:39,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:39,158 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 92b0b352d91bdb0e121a8902637d8c5e 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T05:39:39,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=A 2024-12-12T05:39:39,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:39,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=B 2024-12-12T05:39:39,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:39,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=C 2024-12-12T05:39:39,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:39,164 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121228d5a97220dd4f81aa97dacf3a5fefc3_92b0b352d91bdb0e121a8902637d8c5e is 50, key is test_row_0/A:col10/1733981977579/Put/seqid=0 2024-12-12T05:39:39,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742038_1214 (size=14994) 2024-12-12T05:39:39,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:39,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982039169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:39,172 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:39,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982039170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:39,172 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:39,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982039170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:39,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:39,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982039272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:39,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:39,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982039272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:39,275 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:39,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982039273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:39,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:39,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982039474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:39,477 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:39,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982039475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:39,478 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:39,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982039477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:39,569 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:39,607 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121228d5a97220dd4f81aa97dacf3a5fefc3_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121228d5a97220dd4f81aa97dacf3a5fefc3_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:39,608 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/936b559d09b04b86ba9332cb01a0c5b7, store: [table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:39,609 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/936b559d09b04b86ba9332cb01a0c5b7 is 175, key is test_row_0/A:col10/1733981977579/Put/seqid=0 2024-12-12T05:39:39,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742039_1215 (size=39949) 2024-12-12T05:39:39,727 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:39,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982039725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:39,775 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:39,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982039775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:39,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:39,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982039779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:39,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:39,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982039780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:40,014 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=291, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/936b559d09b04b86ba9332cb01a0c5b7 2024-12-12T05:39:40,021 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/86baa96a300e4084ab462ce48caf5883 is 50, key is test_row_0/B:col10/1733981977579/Put/seqid=0 2024-12-12T05:39:40,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742040_1216 (size=12301) 2024-12-12T05:39:40,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:40,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982040279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:40,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:40,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982040282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:40,286 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:40,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982040283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:40,427 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/86baa96a300e4084ab462ce48caf5883 2024-12-12T05:39:40,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-12T05:39:40,431 INFO [Thread-680 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-12-12T05:39:40,432 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:39:40,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-12-12T05:39:40,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-12T05:39:40,434 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:39:40,434 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:39:40,434 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:39:40,434 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/9296bb8f169749a3ae1fd2e46c345f1b is 50, key is test_row_0/C:col10/1733981977579/Put/seqid=0 2024-12-12T05:39:40,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742041_1217 (size=12301) 2024-12-12T05:39:40,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-12T05:39:40,585 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:40,586 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-12T05:39:40,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:40,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:40,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:40,586 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:40,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:40,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:40,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-12T05:39:40,738 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:40,738 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-12T05:39:40,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:40,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:40,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:40,739 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:40,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:40,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:40,839 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/9296bb8f169749a3ae1fd2e46c345f1b 2024-12-12T05:39:40,843 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/936b559d09b04b86ba9332cb01a0c5b7 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/936b559d09b04b86ba9332cb01a0c5b7 2024-12-12T05:39:40,847 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/936b559d09b04b86ba9332cb01a0c5b7, entries=200, sequenceid=291, filesize=39.0 K 2024-12-12T05:39:40,848 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/86baa96a300e4084ab462ce48caf5883 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/86baa96a300e4084ab462ce48caf5883 2024-12-12T05:39:40,852 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/86baa96a300e4084ab462ce48caf5883, entries=150, sequenceid=291, filesize=12.0 K 2024-12-12T05:39:40,854 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/9296bb8f169749a3ae1fd2e46c345f1b as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/9296bb8f169749a3ae1fd2e46c345f1b 2024-12-12T05:39:40,857 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/9296bb8f169749a3ae1fd2e46c345f1b, entries=150, sequenceid=291, filesize=12.0 K 2024-12-12T05:39:40,858 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 92b0b352d91bdb0e121a8902637d8c5e in 1700ms, sequenceid=291, compaction requested=true 2024-12-12T05:39:40,858 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:40,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92b0b352d91bdb0e121a8902637d8c5e:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:39:40,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:40,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92b0b352d91bdb0e121a8902637d8c5e:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:39:40,858 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T05:39:40,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:40,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92b0b352d91bdb0e121a8902637d8c5e:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:39:40,858 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T05:39:40,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:40,860 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 142604 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T05:39:40,860 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T05:39:40,860 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 92b0b352d91bdb0e121a8902637d8c5e/A is initiating minor compaction (all files) 2024-12-12T05:39:40,860 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 92b0b352d91bdb0e121a8902637d8c5e/B is initiating minor compaction (all files) 2024-12-12T05:39:40,860 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92b0b352d91bdb0e121a8902637d8c5e/A in TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:40,860 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92b0b352d91bdb0e121a8902637d8c5e/B in TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:40,860 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/db3b66191e22418db4a0fba303675982, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/3188d9498ee54189bc954b362fb48b03, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/187c3e9cc61345dfbe74bca197c3b245, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/936b559d09b04b86ba9332cb01a0c5b7] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp, totalSize=139.3 K 2024-12-12T05:39:40,860 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/8c233e83d965402a821a0852f9ebd97f, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/f735b93078cf4a629dcf5bd835c24389, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/62729d59b2804d43b581b0fdedcf2f6d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/86baa96a300e4084ab462ce48caf5883] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp, totalSize=48.3 K 2024-12-12T05:39:40,860 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:40,860 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. files: [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/db3b66191e22418db4a0fba303675982, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/3188d9498ee54189bc954b362fb48b03, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/187c3e9cc61345dfbe74bca197c3b245, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/936b559d09b04b86ba9332cb01a0c5b7] 2024-12-12T05:39:40,860 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting db3b66191e22418db4a0fba303675982, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1733981974397 2024-12-12T05:39:40,860 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c233e83d965402a821a0852f9ebd97f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1733981974397 2024-12-12T05:39:40,860 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting f735b93078cf4a629dcf5bd835c24389, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733981975028 2024-12-12T05:39:40,860 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3188d9498ee54189bc954b362fb48b03, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733981975028 2024-12-12T05:39:40,861 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 62729d59b2804d43b581b0fdedcf2f6d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=266, earliestPutTs=1733981976432 2024-12-12T05:39:40,861 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 187c3e9cc61345dfbe74bca197c3b245, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=266, earliestPutTs=1733981976432 2024-12-12T05:39:40,861 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 86baa96a300e4084ab462ce48caf5883, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733981977579 2024-12-12T05:39:40,861 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 936b559d09b04b86ba9332cb01a0c5b7, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733981977579 2024-12-12T05:39:40,868 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92b0b352d91bdb0e121a8902637d8c5e#B#compaction#180 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:40,868 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:40,868 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/91cdcc6fc2b245adbbab47d8efb1b442 is 50, key is test_row_0/B:col10/1733981977579/Put/seqid=0 2024-12-12T05:39:40,870 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212a9f3412386f24ddb98219e7dd5ea57a8_92b0b352d91bdb0e121a8902637d8c5e store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:40,873 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212a9f3412386f24ddb98219e7dd5ea57a8_92b0b352d91bdb0e121a8902637d8c5e, store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:40,873 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212a9f3412386f24ddb98219e7dd5ea57a8_92b0b352d91bdb0e121a8902637d8c5e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:40,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742042_1218 (size=12983) 2024-12-12T05:39:40,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742043_1219 (size=4469) 2024-12-12T05:39:40,886 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/91cdcc6fc2b245adbbab47d8efb1b442 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/91cdcc6fc2b245adbbab47d8efb1b442 2024-12-12T05:39:40,886 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92b0b352d91bdb0e121a8902637d8c5e#A#compaction#181 average throughput is 1.36 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:40,887 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/d9c5dd04de0a49799872e1848f0f930e is 175, key is test_row_0/A:col10/1733981977579/Put/seqid=0 2024-12-12T05:39:40,890 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:40,891 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-12T05:39:40,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:40,891 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing 92b0b352d91bdb0e121a8902637d8c5e 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T05:39:40,891 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 92b0b352d91bdb0e121a8902637d8c5e/B of 92b0b352d91bdb0e121a8902637d8c5e into 91cdcc6fc2b245adbbab47d8efb1b442(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:40,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=A 2024-12-12T05:39:40,891 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:40,891 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., storeName=92b0b352d91bdb0e121a8902637d8c5e/B, priority=12, startTime=1733981980858; duration=0sec 2024-12-12T05:39:40,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:40,891 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:40,891 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92b0b352d91bdb0e121a8902637d8c5e:B 2024-12-12T05:39:40,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=B 2024-12-12T05:39:40,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:40,891 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T05:39:40,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=C 2024-12-12T05:39:40,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:40,894 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T05:39:40,894 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 92b0b352d91bdb0e121a8902637d8c5e/C is initiating minor compaction (all files) 2024-12-12T05:39:40,894 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92b0b352d91bdb0e121a8902637d8c5e/C in TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:40,894 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/17c97b56a81541cdb66dd5f7db4f7743, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/bca5efa7719d47f4b65e0db2a0b8d209, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/8f82271716384adeb15abe0f276a8d75, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/9296bb8f169749a3ae1fd2e46c345f1b] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp, totalSize=48.3 K 2024-12-12T05:39:40,895 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 17c97b56a81541cdb66dd5f7db4f7743, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1733981974397 2024-12-12T05:39:40,895 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting bca5efa7719d47f4b65e0db2a0b8d209, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733981975028 2024-12-12T05:39:40,895 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f82271716384adeb15abe0f276a8d75, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=266, earliestPutTs=1733981976432 2024-12-12T05:39:40,895 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 9296bb8f169749a3ae1fd2e46c345f1b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733981977579 2024-12-12T05:39:40,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742044_1220 (size=31937) 2024-12-12T05:39:40,921 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92b0b352d91bdb0e121a8902637d8c5e#C#compaction#182 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:40,922 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/0e8b9a331a594888aa45a3d70e99cde7 is 50, key is test_row_0/C:col10/1733981977579/Put/seqid=0 2024-12-12T05:39:40,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212c7ceceb43b064554acf1a92aae86ff1f_92b0b352d91bdb0e121a8902637d8c5e is 50, key is test_row_0/A:col10/1733981979168/Put/seqid=0 2024-12-12T05:39:40,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742046_1222 (size=12454) 2024-12-12T05:39:40,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742045_1221 (size=12983) 2024-12-12T05:39:41,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-12T05:39:41,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:41,289 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:41,314 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:41,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982041310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:41,314 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:41,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982041311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:41,314 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:41,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982041311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:41,322 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/d9c5dd04de0a49799872e1848f0f930e as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/d9c5dd04de0a49799872e1848f0f930e 2024-12-12T05:39:41,326 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 92b0b352d91bdb0e121a8902637d8c5e/A of 92b0b352d91bdb0e121a8902637d8c5e into d9c5dd04de0a49799872e1848f0f930e(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:41,326 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:41,326 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., storeName=92b0b352d91bdb0e121a8902637d8c5e/A, priority=12, startTime=1733981980858; duration=0sec 2024-12-12T05:39:41,326 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:41,327 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92b0b352d91bdb0e121a8902637d8c5e:A 2024-12-12T05:39:41,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:41,337 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212c7ceceb43b064554acf1a92aae86ff1f_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c7ceceb43b064554acf1a92aae86ff1f_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:41,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/ee3d43d238df420cb4a49eabcb1a4090, store: [table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:41,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/ee3d43d238df420cb4a49eabcb1a4090 is 175, key is test_row_0/A:col10/1733981979168/Put/seqid=0 2024-12-12T05:39:41,341 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/0e8b9a331a594888aa45a3d70e99cde7 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/0e8b9a331a594888aa45a3d70e99cde7 2024-12-12T05:39:41,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742047_1223 (size=31255) 2024-12-12T05:39:41,347 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 92b0b352d91bdb0e121a8902637d8c5e/C of 92b0b352d91bdb0e121a8902637d8c5e into 0e8b9a331a594888aa45a3d70e99cde7(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:41,347 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:41,347 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., storeName=92b0b352d91bdb0e121a8902637d8c5e/C, priority=12, startTime=1733981980858; duration=0sec 2024-12-12T05:39:41,347 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:41,347 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92b0b352d91bdb0e121a8902637d8c5e:C 2024-12-12T05:39:41,415 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:41,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982041415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:41,415 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:41,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982041415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:41,416 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:41,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982041415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:41,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-12T05:39:41,618 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:41,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982041616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:41,618 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:41,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982041616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:41,619 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:41,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982041616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:41,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:41,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982041738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:41,740 DEBUG [Thread-674 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4139 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., hostname=83e80bf221ca,46457,1733981928566, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:39:41,744 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=304, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/ee3d43d238df420cb4a49eabcb1a4090 2024-12-12T05:39:41,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/7e51344d5dc44497b8b4c37e5bc88ccf is 50, key is test_row_0/B:col10/1733981979168/Put/seqid=0 2024-12-12T05:39:41,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742048_1224 (size=12301) 2024-12-12T05:39:41,920 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:41,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982041919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:41,921 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:41,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982041920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:41,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:41,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982041921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:42,156 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=304 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/7e51344d5dc44497b8b4c37e5bc88ccf 2024-12-12T05:39:42,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/fc70e03942e24bbba7ab550094c652a0 is 50, key is test_row_0/C:col10/1733981979168/Put/seqid=0 2024-12-12T05:39:42,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742049_1225 (size=12301) 2024-12-12T05:39:42,423 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:42,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982042422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:42,424 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:42,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982042424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:42,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:42,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982042426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:42,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-12T05:39:42,569 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=304 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/fc70e03942e24bbba7ab550094c652a0 2024-12-12T05:39:42,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/ee3d43d238df420cb4a49eabcb1a4090 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/ee3d43d238df420cb4a49eabcb1a4090 2024-12-12T05:39:42,577 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/ee3d43d238df420cb4a49eabcb1a4090, entries=150, sequenceid=304, filesize=30.5 K 2024-12-12T05:39:42,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/7e51344d5dc44497b8b4c37e5bc88ccf as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/7e51344d5dc44497b8b4c37e5bc88ccf 2024-12-12T05:39:42,582 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/7e51344d5dc44497b8b4c37e5bc88ccf, entries=150, sequenceid=304, filesize=12.0 K 2024-12-12T05:39:42,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/fc70e03942e24bbba7ab550094c652a0 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/fc70e03942e24bbba7ab550094c652a0 2024-12-12T05:39:42,588 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/fc70e03942e24bbba7ab550094c652a0, entries=150, sequenceid=304, filesize=12.0 K 2024-12-12T05:39:42,589 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 92b0b352d91bdb0e121a8902637d8c5e in 1697ms, sequenceid=304, compaction requested=false 2024-12-12T05:39:42,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:42,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:42,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-12-12T05:39:42,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-12-12T05:39:42,591 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-12-12T05:39:42,591 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1560 sec 2024-12-12T05:39:42,592 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 2.1590 sec 2024-12-12T05:39:43,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:43,429 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 92b0b352d91bdb0e121a8902637d8c5e 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T05:39:43,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=A 2024-12-12T05:39:43,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:43,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=B 2024-12-12T05:39:43,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:43,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=C 2024-12-12T05:39:43,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:43,436 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121297aeddb2ff1c4bbcb7ec854d9cbb74f6_92b0b352d91bdb0e121a8902637d8c5e is 50, key is test_row_0/A:col10/1733981983428/Put/seqid=0 2024-12-12T05:39:43,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:43,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982043439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:43,441 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:43,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982043439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:43,441 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:43,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982043439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:43,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742050_1226 (size=12454) 2024-12-12T05:39:43,450 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:43,453 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121297aeddb2ff1c4bbcb7ec854d9cbb74f6_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121297aeddb2ff1c4bbcb7ec854d9cbb74f6_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:43,454 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/f29ece4b122442f394433ccf105db4fb, store: [table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:43,454 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/f29ece4b122442f394433ccf105db4fb is 175, key is test_row_0/A:col10/1733981983428/Put/seqid=0 2024-12-12T05:39:43,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742051_1227 (size=31255) 2024-12-12T05:39:43,542 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:43,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982043541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:43,542 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:43,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982043541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:43,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:43,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982043542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:43,745 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:43,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982043743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:43,745 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:43,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982043744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:43,746 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:43,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982043746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:43,858 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=331, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/f29ece4b122442f394433ccf105db4fb 2024-12-12T05:39:43,865 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/61dc4b879709410aa1220748b00c8007 is 50, key is test_row_0/B:col10/1733981983428/Put/seqid=0 2024-12-12T05:39:43,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742052_1228 (size=12301) 2024-12-12T05:39:44,048 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:44,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:44,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982044046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:44,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982044047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:44,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:44,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982044047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:44,269 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/61dc4b879709410aa1220748b00c8007 2024-12-12T05:39:44,276 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/a1b81c600ffc4f44b5be29ccbf8e8327 is 50, key is test_row_0/C:col10/1733981983428/Put/seqid=0 2024-12-12T05:39:44,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742053_1229 (size=12301) 2024-12-12T05:39:44,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-12T05:39:44,538 INFO [Thread-680 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-12-12T05:39:44,539 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:39:44,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees 2024-12-12T05:39:44,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-12T05:39:44,540 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:39:44,540 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:39:44,541 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:39:44,550 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:44,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982044550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:44,552 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:44,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982044551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:44,554 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:44,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982044553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:44,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-12T05:39:44,681 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/a1b81c600ffc4f44b5be29ccbf8e8327 2024-12-12T05:39:44,692 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:44,692 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-12T05:39:44,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:44,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:44,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:44,692 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:44,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:44,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:44,716 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/f29ece4b122442f394433ccf105db4fb as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/f29ece4b122442f394433ccf105db4fb 2024-12-12T05:39:44,720 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/f29ece4b122442f394433ccf105db4fb, entries=150, sequenceid=331, filesize=30.5 K 2024-12-12T05:39:44,721 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/61dc4b879709410aa1220748b00c8007 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/61dc4b879709410aa1220748b00c8007 2024-12-12T05:39:44,725 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/61dc4b879709410aa1220748b00c8007, entries=150, sequenceid=331, filesize=12.0 K 2024-12-12T05:39:44,726 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/a1b81c600ffc4f44b5be29ccbf8e8327 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/a1b81c600ffc4f44b5be29ccbf8e8327 2024-12-12T05:39:44,730 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/a1b81c600ffc4f44b5be29ccbf8e8327, entries=150, sequenceid=331, filesize=12.0 K 2024-12-12T05:39:44,731 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 92b0b352d91bdb0e121a8902637d8c5e in 1301ms, sequenceid=331, compaction requested=true 2024-12-12T05:39:44,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:44,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92b0b352d91bdb0e121a8902637d8c5e:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:39:44,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:44,731 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:44,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92b0b352d91bdb0e121a8902637d8c5e:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:39:44,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:44,731 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:44,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92b0b352d91bdb0e121a8902637d8c5e:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:39:44,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:44,732 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94447 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:44,732 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:44,732 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 92b0b352d91bdb0e121a8902637d8c5e/A is initiating minor compaction (all files) 2024-12-12T05:39:44,732 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 92b0b352d91bdb0e121a8902637d8c5e/B is initiating minor compaction (all files) 2024-12-12T05:39:44,732 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92b0b352d91bdb0e121a8902637d8c5e/A in TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:44,732 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92b0b352d91bdb0e121a8902637d8c5e/B in TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:44,732 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/d9c5dd04de0a49799872e1848f0f930e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/ee3d43d238df420cb4a49eabcb1a4090, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/f29ece4b122442f394433ccf105db4fb] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp, totalSize=92.2 K 2024-12-12T05:39:44,732 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/91cdcc6fc2b245adbbab47d8efb1b442, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/7e51344d5dc44497b8b4c37e5bc88ccf, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/61dc4b879709410aa1220748b00c8007] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp, totalSize=36.7 K 2024-12-12T05:39:44,732 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:44,732 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. files: [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/d9c5dd04de0a49799872e1848f0f930e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/ee3d43d238df420cb4a49eabcb1a4090, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/f29ece4b122442f394433ccf105db4fb] 2024-12-12T05:39:44,732 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 91cdcc6fc2b245adbbab47d8efb1b442, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733981977579 2024-12-12T05:39:44,732 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9c5dd04de0a49799872e1848f0f930e, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733981977579 2024-12-12T05:39:44,733 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e51344d5dc44497b8b4c37e5bc88ccf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1733981979161 2024-12-12T05:39:44,733 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee3d43d238df420cb4a49eabcb1a4090, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1733981979161 2024-12-12T05:39:44,733 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting f29ece4b122442f394433ccf105db4fb, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733981981307 2024-12-12T05:39:44,733 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 61dc4b879709410aa1220748b00c8007, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733981981307 2024-12-12T05:39:44,739 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:44,740 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92b0b352d91bdb0e121a8902637d8c5e#B#compaction#189 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:44,741 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/c369126e42974c669d82a6442cbb9aca is 50, key is test_row_0/B:col10/1733981983428/Put/seqid=0 2024-12-12T05:39:44,741 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412125a270e2e8a554bf5a9de2b2c3b94c35f_92b0b352d91bdb0e121a8902637d8c5e store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:44,743 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412125a270e2e8a554bf5a9de2b2c3b94c35f_92b0b352d91bdb0e121a8902637d8c5e, store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:44,743 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412125a270e2e8a554bf5a9de2b2c3b94c35f_92b0b352d91bdb0e121a8902637d8c5e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:44,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742054_1230 (size=13085) 2024-12-12T05:39:44,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742055_1231 (size=4469) 2024-12-12T05:39:44,749 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92b0b352d91bdb0e121a8902637d8c5e#A#compaction#190 average throughput is 2.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:44,750 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/51765d08e95741a59b5793d60ef6eda0 is 175, key is test_row_0/A:col10/1733981983428/Put/seqid=0 2024-12-12T05:39:44,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742056_1232 (size=32039) 2024-12-12T05:39:44,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-12T05:39:44,865 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:44,865 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-12T05:39:44,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:44,865 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2837): Flushing 92b0b352d91bdb0e121a8902637d8c5e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T05:39:44,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=A 2024-12-12T05:39:44,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:44,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=B 2024-12-12T05:39:44,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:44,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=C 2024-12-12T05:39:44,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:44,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212b6cccb37509f403aa0f7b7668a5f1a1c_92b0b352d91bdb0e121a8902637d8c5e is 50, key is test_row_0/A:col10/1733981983438/Put/seqid=0 2024-12-12T05:39:44,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742057_1233 (size=12454) 2024-12-12T05:39:45,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-12T05:39:45,151 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/c369126e42974c669d82a6442cbb9aca as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/c369126e42974c669d82a6442cbb9aca 2024-12-12T05:39:45,156 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92b0b352d91bdb0e121a8902637d8c5e/B of 92b0b352d91bdb0e121a8902637d8c5e into c369126e42974c669d82a6442cbb9aca(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:45,156 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:45,156 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., storeName=92b0b352d91bdb0e121a8902637d8c5e/B, priority=13, startTime=1733981984731; duration=0sec 2024-12-12T05:39:45,156 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:45,156 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92b0b352d91bdb0e121a8902637d8c5e:B 2024-12-12T05:39:45,156 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:45,157 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:45,158 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 92b0b352d91bdb0e121a8902637d8c5e/C is initiating minor compaction (all files) 2024-12-12T05:39:45,158 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92b0b352d91bdb0e121a8902637d8c5e/C in TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:45,158 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/0e8b9a331a594888aa45a3d70e99cde7, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/fc70e03942e24bbba7ab550094c652a0, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/a1b81c600ffc4f44b5be29ccbf8e8327] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp, totalSize=36.7 K 2024-12-12T05:39:45,158 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e8b9a331a594888aa45a3d70e99cde7, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733981977579 2024-12-12T05:39:45,159 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting fc70e03942e24bbba7ab550094c652a0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1733981979161 2024-12-12T05:39:45,159 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting a1b81c600ffc4f44b5be29ccbf8e8327, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733981981307 2024-12-12T05:39:45,162 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/51765d08e95741a59b5793d60ef6eda0 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/51765d08e95741a59b5793d60ef6eda0 2024-12-12T05:39:45,166 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92b0b352d91bdb0e121a8902637d8c5e/A of 92b0b352d91bdb0e121a8902637d8c5e into 51765d08e95741a59b5793d60ef6eda0(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:45,166 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:45,166 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., storeName=92b0b352d91bdb0e121a8902637d8c5e/A, priority=13, startTime=1733981984731; duration=0sec 2024-12-12T05:39:45,167 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:45,167 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92b0b352d91bdb0e121a8902637d8c5e:A 2024-12-12T05:39:45,168 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92b0b352d91bdb0e121a8902637d8c5e#C#compaction#192 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:45,168 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/3adc7ef63dab401e8bf8586a5f90b8c1 is 50, key is test_row_0/C:col10/1733981983428/Put/seqid=0 2024-12-12T05:39:45,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742058_1234 (size=13085) 2024-12-12T05:39:45,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:45,282 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212b6cccb37509f403aa0f7b7668a5f1a1c_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212b6cccb37509f403aa0f7b7668a5f1a1c_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:45,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/90e8ed33b5554531a79eccdeff54aba9, store: [table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:45,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/90e8ed33b5554531a79eccdeff54aba9 is 175, key is test_row_0/A:col10/1733981983438/Put/seqid=0 2024-12-12T05:39:45,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742059_1235 (size=31255) 2024-12-12T05:39:45,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:45,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:45,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:45,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982045576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:45,579 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:45,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982045577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:45,581 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:45,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982045579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:45,587 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/3adc7ef63dab401e8bf8586a5f90b8c1 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/3adc7ef63dab401e8bf8586a5f90b8c1 2024-12-12T05:39:45,592 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92b0b352d91bdb0e121a8902637d8c5e/C of 92b0b352d91bdb0e121a8902637d8c5e into 3adc7ef63dab401e8bf8586a5f90b8c1(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:45,592 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:45,592 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., storeName=92b0b352d91bdb0e121a8902637d8c5e/C, priority=13, startTime=1733981984731; duration=0sec 2024-12-12T05:39:45,592 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:45,592 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92b0b352d91bdb0e121a8902637d8c5e:C 2024-12-12T05:39:45,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-12T05:39:45,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:45,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982045680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:45,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:45,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982045680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:45,684 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:45,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982045682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:45,688 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=342, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/90e8ed33b5554531a79eccdeff54aba9 2024-12-12T05:39:45,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/483c9d779d0b4985b426a68ace22dfe0 is 50, key is test_row_0/B:col10/1733981983438/Put/seqid=0 2024-12-12T05:39:45,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742060_1236 (size=12301) 2024-12-12T05:39:45,769 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:45,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45340 deadline: 1733982045768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:45,770 DEBUG [Thread-674 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8169 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., hostname=83e80bf221ca,46457,1733981928566, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:39:45,882 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:45,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982045881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:45,883 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:45,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982045882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:45,885 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:45,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982045885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:46,099 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/483c9d779d0b4985b426a68ace22dfe0 2024-12-12T05:39:46,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/308f3325b9714b96b8c8ffe510c6c517 is 50, key is test_row_0/C:col10/1733981983438/Put/seqid=0 2024-12-12T05:39:46,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742061_1237 (size=12301) 2024-12-12T05:39:46,128 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/308f3325b9714b96b8c8ffe510c6c517 2024-12-12T05:39:46,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/90e8ed33b5554531a79eccdeff54aba9 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/90e8ed33b5554531a79eccdeff54aba9 2024-12-12T05:39:46,136 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/90e8ed33b5554531a79eccdeff54aba9, entries=150, sequenceid=342, filesize=30.5 K 2024-12-12T05:39:46,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/483c9d779d0b4985b426a68ace22dfe0 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/483c9d779d0b4985b426a68ace22dfe0 2024-12-12T05:39:46,139 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/483c9d779d0b4985b426a68ace22dfe0, entries=150, sequenceid=342, filesize=12.0 K 2024-12-12T05:39:46,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/308f3325b9714b96b8c8ffe510c6c517 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/308f3325b9714b96b8c8ffe510c6c517 2024-12-12T05:39:46,144 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/308f3325b9714b96b8c8ffe510c6c517, entries=150, sequenceid=342, filesize=12.0 K 2024-12-12T05:39:46,145 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 92b0b352d91bdb0e121a8902637d8c5e in 1279ms, sequenceid=342, compaction requested=false 2024-12-12T05:39:46,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2538): Flush status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:46,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:46,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=62 2024-12-12T05:39:46,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=62 2024-12-12T05:39:46,147 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-12-12T05:39:46,148 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6050 sec 2024-12-12T05:39:46,149 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees in 1.6090 sec 2024-12-12T05:39:46,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:46,185 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 92b0b352d91bdb0e121a8902637d8c5e 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T05:39:46,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=A 2024-12-12T05:39:46,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:46,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=B 2024-12-12T05:39:46,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:46,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=C 2024-12-12T05:39:46,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:46,193 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212eda36a7a59b847fb976dd5002aaa5987_92b0b352d91bdb0e121a8902637d8c5e is 50, key is test_row_0/A:col10/1733981986184/Put/seqid=0 2024-12-12T05:39:46,193 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:46,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982046191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:46,193 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:46,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982046192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:46,193 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:46,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982046192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:46,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742062_1238 (size=14994) 2024-12-12T05:39:46,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:46,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982046294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:46,295 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:46,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982046294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:46,295 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:46,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982046294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:46,496 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:46,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982046496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:46,498 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:46,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982046497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:46,498 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:46,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982046497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:46,601 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:46,604 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212eda36a7a59b847fb976dd5002aaa5987_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212eda36a7a59b847fb976dd5002aaa5987_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:46,605 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/5beef804ea794bd8ad2e89de2f98121a, store: [table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:46,606 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/5beef804ea794bd8ad2e89de2f98121a is 175, key is test_row_0/A:col10/1733981986184/Put/seqid=0 2024-12-12T05:39:46,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742063_1239 (size=39949) 2024-12-12T05:39:46,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:46,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45324 deadline: 1733982046637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:46,638 DEBUG [Thread-678 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18271 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., hostname=83e80bf221ca,46457,1733981928566, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:39:46,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-12T05:39:46,644 INFO [Thread-680 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-12-12T05:39:46,645 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:39:46,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees 2024-12-12T05:39:46,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-12T05:39:46,646 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:39:46,646 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:39:46,646 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:39:46,689 DEBUG [Thread-683 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0341384e to 127.0.0.1:60303 2024-12-12T05:39:46,689 DEBUG [Thread-683 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:39:46,690 DEBUG [Thread-681 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x14c16cd4 to 127.0.0.1:60303 2024-12-12T05:39:46,690 DEBUG [Thread-681 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:39:46,691 DEBUG [Thread-685 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x26b120d9 to 127.0.0.1:60303 2024-12-12T05:39:46,691 DEBUG [Thread-685 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:39:46,694 DEBUG [Thread-687 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4c1ec7ee to 127.0.0.1:60303 2024-12-12T05:39:46,694 DEBUG [Thread-687 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:39:46,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-12T05:39:46,797 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:46,798 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-12T05:39:46,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:46,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982046798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:46,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:46,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:46,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:46,799 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:46,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:46,799 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:46,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982046799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:46,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:46,802 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:46,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982046801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:46,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-12T05:39:46,952 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:46,953 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-12T05:39:46,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:46,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:46,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:46,954 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:46,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:46,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:46,987 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T05:39:47,011 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=371, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/5beef804ea794bd8ad2e89de2f98121a 2024-12-12T05:39:47,024 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/46c27fb4443645c6bfa623dcea662ebd is 50, key is test_row_0/B:col10/1733981986184/Put/seqid=0 2024-12-12T05:39:47,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742064_1240 (size=12301) 2024-12-12T05:39:47,107 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:47,108 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-12T05:39:47,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:47,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:47,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:47,109 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:47,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:47,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:47,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-12T05:39:47,263 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:47,264 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-12T05:39:47,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:47,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:47,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:47,265 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:47,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:47,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:47,301 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:47,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45304 deadline: 1733982047300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:47,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:47,306 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:39:47,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45364 deadline: 1733982047306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:47,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45382 deadline: 1733982047306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:47,420 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:47,421 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-12T05:39:47,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:47,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:47,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:47,422 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:47,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:47,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:47,430 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/46c27fb4443645c6bfa623dcea662ebd 2024-12-12T05:39:47,442 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/c20df96eb4a446cb9d4b6d4825d74691 is 50, key is test_row_0/C:col10/1733981986184/Put/seqid=0 2024-12-12T05:39:47,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742065_1241 (size=12301) 2024-12-12T05:39:47,577 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:47,578 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-12T05:39:47,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:47,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:47,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:47,579 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:47,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:47,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:47,733 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:47,734 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-12T05:39:47,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:47,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:47,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:47,734 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:47,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:47,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:39:47,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-12T05:39:47,847 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/c20df96eb4a446cb9d4b6d4825d74691 2024-12-12T05:39:47,858 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/5beef804ea794bd8ad2e89de2f98121a as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/5beef804ea794bd8ad2e89de2f98121a 2024-12-12T05:39:47,864 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/5beef804ea794bd8ad2e89de2f98121a, entries=200, sequenceid=371, filesize=39.0 K 2024-12-12T05:39:47,865 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/46c27fb4443645c6bfa623dcea662ebd as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/46c27fb4443645c6bfa623dcea662ebd 2024-12-12T05:39:47,870 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/46c27fb4443645c6bfa623dcea662ebd, entries=150, sequenceid=371, filesize=12.0 K 2024-12-12T05:39:47,871 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/c20df96eb4a446cb9d4b6d4825d74691 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/c20df96eb4a446cb9d4b6d4825d74691 2024-12-12T05:39:47,875 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/c20df96eb4a446cb9d4b6d4825d74691, entries=150, sequenceid=371, filesize=12.0 K 2024-12-12T05:39:47,876 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 92b0b352d91bdb0e121a8902637d8c5e in 1691ms, sequenceid=371, compaction requested=true 2024-12-12T05:39:47,876 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:47,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92b0b352d91bdb0e121a8902637d8c5e:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:39:47,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:47,877 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:47,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92b0b352d91bdb0e121a8902637d8c5e:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:39:47,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:47,877 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:47,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92b0b352d91bdb0e121a8902637d8c5e:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:39:47,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:47,878 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:47,878 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103243 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:47,878 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 92b0b352d91bdb0e121a8902637d8c5e/A is initiating minor compaction (all files) 2024-12-12T05:39:47,878 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 92b0b352d91bdb0e121a8902637d8c5e/B is initiating minor compaction (all files) 2024-12-12T05:39:47,878 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92b0b352d91bdb0e121a8902637d8c5e/A in TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:47,878 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92b0b352d91bdb0e121a8902637d8c5e/B in TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:47,878 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/c369126e42974c669d82a6442cbb9aca, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/483c9d779d0b4985b426a68ace22dfe0, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/46c27fb4443645c6bfa623dcea662ebd] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp, totalSize=36.8 K 2024-12-12T05:39:47,878 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/51765d08e95741a59b5793d60ef6eda0, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/90e8ed33b5554531a79eccdeff54aba9, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/5beef804ea794bd8ad2e89de2f98121a] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp, totalSize=100.8 K 2024-12-12T05:39:47,878 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:47,878 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. files: [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/51765d08e95741a59b5793d60ef6eda0, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/90e8ed33b5554531a79eccdeff54aba9, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/5beef804ea794bd8ad2e89de2f98121a] 2024-12-12T05:39:47,878 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting c369126e42974c669d82a6442cbb9aca, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733981981307 2024-12-12T05:39:47,879 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 51765d08e95741a59b5793d60ef6eda0, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733981981307 2024-12-12T05:39:47,879 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 483c9d779d0b4985b426a68ace22dfe0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1733981983435 2024-12-12T05:39:47,879 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 90e8ed33b5554531a79eccdeff54aba9, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1733981983435 2024-12-12T05:39:47,879 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 46c27fb4443645c6bfa623dcea662ebd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1733981985576 2024-12-12T05:39:47,879 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5beef804ea794bd8ad2e89de2f98121a, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1733981985572 2024-12-12T05:39:47,886 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:47,887 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-12T05:39:47,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:47,887 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92b0b352d91bdb0e121a8902637d8c5e#B#compaction#198 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:47,887 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2837): Flushing 92b0b352d91bdb0e121a8902637d8c5e 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-12T05:39:47,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=A 2024-12-12T05:39:47,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:47,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=B 2024-12-12T05:39:47,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:47,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=C 2024-12-12T05:39:47,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:47,887 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/97ce618d596847b591bffde816aee60a is 50, key is test_row_0/B:col10/1733981986184/Put/seqid=0 2024-12-12T05:39:47,888 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:47,890 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121243bbb77c607b4bf78fc85618c940dd8f_92b0b352d91bdb0e121a8902637d8c5e store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:47,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742066_1242 (size=13187) 2024-12-12T05:39:47,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121235f8567a0cff4aa6897c7e23d72a552f_92b0b352d91bdb0e121a8902637d8c5e is 50, key is test_row_0/A:col10/1733981986191/Put/seqid=0 2024-12-12T05:39:47,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742067_1243 (size=12454) 2024-12-12T05:39:47,908 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121243bbb77c607b4bf78fc85618c940dd8f_92b0b352d91bdb0e121a8902637d8c5e, store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:47,908 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121243bbb77c607b4bf78fc85618c940dd8f_92b0b352d91bdb0e121a8902637d8c5e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:47,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742068_1244 (size=4469) 2024-12-12T05:39:48,304 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/97ce618d596847b591bffde816aee60a as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/97ce618d596847b591bffde816aee60a 2024-12-12T05:39:48,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:48,304 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. as already flushing 2024-12-12T05:39:48,305 DEBUG [Thread-672 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0e3a4420 to 127.0.0.1:60303 2024-12-12T05:39:48,305 DEBUG [Thread-672 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:39:48,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:48,309 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121235f8567a0cff4aa6897c7e23d72a552f_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121235f8567a0cff4aa6897c7e23d72a552f_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:48,310 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92b0b352d91bdb0e121a8902637d8c5e/B of 92b0b352d91bdb0e121a8902637d8c5e into 97ce618d596847b591bffde816aee60a(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:48,310 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:48,310 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., storeName=92b0b352d91bdb0e121a8902637d8c5e/B, priority=13, startTime=1733981987877; duration=0sec 2024-12-12T05:39:48,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/e4231d1424a04c4db1f25095a73b17ee, store: [table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:48,310 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:39:48,310 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92b0b352d91bdb0e121a8902637d8c5e:B 2024-12-12T05:39:48,310 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:39:48,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/e4231d1424a04c4db1f25095a73b17ee is 175, key is test_row_0/A:col10/1733981986191/Put/seqid=0 2024-12-12T05:39:48,312 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:39:48,312 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 92b0b352d91bdb0e121a8902637d8c5e/C is initiating minor compaction (all files) 2024-12-12T05:39:48,312 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92b0b352d91bdb0e121a8902637d8c5e/C in TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:48,312 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/3adc7ef63dab401e8bf8586a5f90b8c1, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/308f3325b9714b96b8c8ffe510c6c517, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/c20df96eb4a446cb9d4b6d4825d74691] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp, totalSize=36.8 K 2024-12-12T05:39:48,312 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 3adc7ef63dab401e8bf8586a5f90b8c1, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733981981307 2024-12-12T05:39:48,313 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 308f3325b9714b96b8c8ffe510c6c517, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1733981983435 2024-12-12T05:39:48,313 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting c20df96eb4a446cb9d4b6d4825d74691, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1733981985576 2024-12-12T05:39:48,314 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92b0b352d91bdb0e121a8902637d8c5e#A#compaction#199 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:48,315 DEBUG [Thread-676 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0a4c53ed to 127.0.0.1:60303 2024-12-12T05:39:48,315 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/1593f1fb25654843afa9b98c1cc4818f is 175, key is test_row_0/A:col10/1733981986184/Put/seqid=0 2024-12-12T05:39:48,315 DEBUG [Thread-676 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:39:48,316 DEBUG [Thread-670 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5c820ef9 to 127.0.0.1:60303 2024-12-12T05:39:48,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742069_1245 (size=31255) 2024-12-12T05:39:48,316 DEBUG [Thread-670 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:39:48,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742070_1246 (size=32141) 2024-12-12T05:39:48,321 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92b0b352d91bdb0e121a8902637d8c5e#C#compaction#201 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:39:48,322 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/bce152485428427989f4a18ce8abe020 is 50, key is test_row_0/C:col10/1733981986184/Put/seqid=0 2024-12-12T05:39:48,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742071_1247 (size=13187) 2024-12-12T05:39:48,716 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=381, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/e4231d1424a04c4db1f25095a73b17ee 2024-12-12T05:39:48,723 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/1593f1fb25654843afa9b98c1cc4818f as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/1593f1fb25654843afa9b98c1cc4818f 2024-12-12T05:39:48,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/618fc6a0be6e463784093895f09cb2ac is 50, key is test_row_0/B:col10/1733981986191/Put/seqid=0 2024-12-12T05:39:48,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742072_1248 (size=12301) 2024-12-12T05:39:48,728 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92b0b352d91bdb0e121a8902637d8c5e/A of 92b0b352d91bdb0e121a8902637d8c5e into 1593f1fb25654843afa9b98c1cc4818f(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:48,728 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:48,728 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., storeName=92b0b352d91bdb0e121a8902637d8c5e/A, priority=13, startTime=1733981987876; duration=0sec 2024-12-12T05:39:48,728 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:48,728 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92b0b352d91bdb0e121a8902637d8c5e:A 2024-12-12T05:39:48,730 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/bce152485428427989f4a18ce8abe020 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/bce152485428427989f4a18ce8abe020 2024-12-12T05:39:48,734 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92b0b352d91bdb0e121a8902637d8c5e/C of 92b0b352d91bdb0e121a8902637d8c5e into bce152485428427989f4a18ce8abe020(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:39:48,734 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:48,734 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e., storeName=92b0b352d91bdb0e121a8902637d8c5e/C, priority=13, startTime=1733981987877; duration=0sec 2024-12-12T05:39:48,734 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:39:48,734 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92b0b352d91bdb0e121a8902637d8c5e:C 2024-12-12T05:39:48,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-12T05:39:49,129 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/618fc6a0be6e463784093895f09cb2ac 2024-12-12T05:39:49,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/07195be185354406a09df55ebcdd3008 is 50, key is test_row_0/C:col10/1733981986191/Put/seqid=0 2024-12-12T05:39:49,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742073_1249 (size=12301) 2024-12-12T05:39:49,553 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/07195be185354406a09df55ebcdd3008 2024-12-12T05:39:49,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/e4231d1424a04c4db1f25095a73b17ee as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/e4231d1424a04c4db1f25095a73b17ee 2024-12-12T05:39:49,569 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/e4231d1424a04c4db1f25095a73b17ee, entries=150, sequenceid=381, filesize=30.5 K 2024-12-12T05:39:49,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/618fc6a0be6e463784093895f09cb2ac as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/618fc6a0be6e463784093895f09cb2ac 2024-12-12T05:39:49,576 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/618fc6a0be6e463784093895f09cb2ac, entries=150, sequenceid=381, filesize=12.0 K 2024-12-12T05:39:49,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/07195be185354406a09df55ebcdd3008 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/07195be185354406a09df55ebcdd3008 2024-12-12T05:39:49,583 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/07195be185354406a09df55ebcdd3008, entries=150, sequenceid=381, filesize=12.0 K 2024-12-12T05:39:49,584 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=20.13 KB/20610 for 92b0b352d91bdb0e121a8902637d8c5e in 1696ms, sequenceid=381, compaction requested=false 2024-12-12T05:39:49,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2538): Flush status journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:49,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:49,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=64 2024-12-12T05:39:49,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=64 2024-12-12T05:39:49,586 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-12-12T05:39:49,586 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.9390 sec 2024-12-12T05:39:49,588 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees in 2.9420 sec 2024-12-12T05:39:50,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-12T05:39:50,755 INFO [Thread-680 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-12-12T05:39:53,916 DEBUG [master/83e80bf221ca:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region b675848e5b5abf83ab0aa0c34e08f9b3 changed from -1.0 to 0.0, refreshing cache 2024-12-12T05:39:55,801 DEBUG [Thread-674 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x42e904d8 to 127.0.0.1:60303 2024-12-12T05:39:55,801 DEBUG [Thread-674 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:39:56,677 DEBUG [Thread-678 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x22e911df to 127.0.0.1:60303 2024-12-12T05:39:56,677 DEBUG [Thread-678 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:39:56,678 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-12T05:39:56,678 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 76 2024-12-12T05:39:56,678 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 72 2024-12-12T05:39:56,678 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 64 2024-12-12T05:39:56,678 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 71 2024-12-12T05:39:56,678 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 17 2024-12-12T05:39:56,678 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-12T05:39:56,678 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8222 2024-12-12T05:39:56,678 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7691 2024-12-12T05:39:56,678 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-12T05:39:56,679 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3392 2024-12-12T05:39:56,679 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10173 rows 2024-12-12T05:39:56,679 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3390 2024-12-12T05:39:56,679 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10170 rows 2024-12-12T05:39:56,679 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-12T05:39:56,679 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x26401a5f to 127.0.0.1:60303 2024-12-12T05:39:56,679 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:39:56,684 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-12T05:39:56,685 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-12T05:39:56,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=65, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-12T05:39:56,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-12T05:39:56,688 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733981996688"}]},"ts":"1733981996688"} 2024-12-12T05:39:56,689 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-12T05:39:56,736 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-12T05:39:56,738 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T05:39:56,740 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=67, ppid=66, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=92b0b352d91bdb0e121a8902637d8c5e, UNASSIGN}] 2024-12-12T05:39:56,742 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=67, ppid=66, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=92b0b352d91bdb0e121a8902637d8c5e, UNASSIGN 2024-12-12T05:39:56,743 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=67 updating hbase:meta row=92b0b352d91bdb0e121a8902637d8c5e, regionState=CLOSING, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:56,745 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T05:39:56,745 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; CloseRegionProcedure 92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566}] 2024-12-12T05:39:56,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-12T05:39:56,898 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:39:56,899 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] handler.UnassignRegionHandler(124): Close 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:56,899 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T05:39:56,899 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(1681): Closing 92b0b352d91bdb0e121a8902637d8c5e, disabling compactions & flushes 2024-12-12T05:39:56,900 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:56,900 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:56,900 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. after waiting 0 ms 2024-12-12T05:39:56,900 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:56,900 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(2837): Flushing 92b0b352d91bdb0e121a8902637d8c5e 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-12T05:39:56,900 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=A 2024-12-12T05:39:56,901 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:56,901 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=B 2024-12-12T05:39:56,901 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:56,901 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92b0b352d91bdb0e121a8902637d8c5e, store=C 2024-12-12T05:39:56,901 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:39:56,912 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412124572fdd38232445895f628be06ab19ab_92b0b352d91bdb0e121a8902637d8c5e is 50, key is test_row_0/A:col10/1733981995798/Put/seqid=0 2024-12-12T05:39:56,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742074_1250 (size=12454) 2024-12-12T05:39:56,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-12T05:39:57,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-12T05:39:57,318 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:57,328 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412124572fdd38232445895f628be06ab19ab_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412124572fdd38232445895f628be06ab19ab_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:57,329 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/0973b5413c4c440cb65ea01247c3d05b, store: [table=TestAcidGuarantees family=A region=92b0b352d91bdb0e121a8902637d8c5e] 2024-12-12T05:39:57,331 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/0973b5413c4c440cb65ea01247c3d05b is 175, key is test_row_0/A:col10/1733981995798/Put/seqid=0 2024-12-12T05:39:57,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742075_1251 (size=31255) 2024-12-12T05:39:57,735 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=392, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/0973b5413c4c440cb65ea01247c3d05b 2024-12-12T05:39:57,746 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/f1a4f701e2444e17b0e2bd21b6c5f499 is 50, key is test_row_0/B:col10/1733981995798/Put/seqid=0 2024-12-12T05:39:57,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742076_1252 (size=12301) 2024-12-12T05:39:57,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-12T05:39:58,153 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/f1a4f701e2444e17b0e2bd21b6c5f499 2024-12-12T05:39:58,165 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/0dbe826aac214f36adee67f8968aae0a is 50, key is test_row_0/C:col10/1733981995798/Put/seqid=0 2024-12-12T05:39:58,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742077_1253 (size=12301) 2024-12-12T05:39:58,572 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/0dbe826aac214f36adee67f8968aae0a 2024-12-12T05:39:58,581 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/A/0973b5413c4c440cb65ea01247c3d05b as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/0973b5413c4c440cb65ea01247c3d05b 2024-12-12T05:39:58,586 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/0973b5413c4c440cb65ea01247c3d05b, entries=150, sequenceid=392, filesize=30.5 K 2024-12-12T05:39:58,587 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/B/f1a4f701e2444e17b0e2bd21b6c5f499 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/f1a4f701e2444e17b0e2bd21b6c5f499 2024-12-12T05:39:58,593 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/f1a4f701e2444e17b0e2bd21b6c5f499, entries=150, sequenceid=392, filesize=12.0 K 2024-12-12T05:39:58,594 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/.tmp/C/0dbe826aac214f36adee67f8968aae0a as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/0dbe826aac214f36adee67f8968aae0a 2024-12-12T05:39:58,600 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/0dbe826aac214f36adee67f8968aae0a, entries=150, sequenceid=392, filesize=12.0 K 2024-12-12T05:39:58,601 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 92b0b352d91bdb0e121a8902637d8c5e in 1701ms, sequenceid=392, compaction requested=true 2024-12-12T05:39:58,601 DEBUG [StoreCloser-TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/124ed0f262ac4a548314be56609e0f00, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/a2460f1c746c4358b4c32a18f22570a9, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/83da5e0af8e94600a9c358139e0721c2, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/cea44cd5e9b54454aaa81b5b732b92d1, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/df7de6f3b15942f4b2cfa8984eb76b15, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/531efc3bad5248ca860b8c527b554b1e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/9a0eb16fbd0a4fbc87e6f8d30e22151f, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/22a7621d750a465482c3441f566ceb5a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/53e0906053944099ac77fe499eb46d08, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/65993ee67b2040b39a74f9ecb203aae8, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/52230461c55a43e1ae00479c36cf8ba8, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/346e3938a33b496b88f6e34fbd41807b, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/0857c13656d84536a389deb8cdc03f5a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/29b67554d4d04fe0bec43e64b1209a51, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/fc02a13dbb0e4e37928f32159dd238f9, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/33d8ef679c12427eb2095af284549348, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/db3b66191e22418db4a0fba303675982, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/3188d9498ee54189bc954b362fb48b03, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/187c3e9cc61345dfbe74bca197c3b245, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/936b559d09b04b86ba9332cb01a0c5b7, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/d9c5dd04de0a49799872e1848f0f930e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/ee3d43d238df420cb4a49eabcb1a4090, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/51765d08e95741a59b5793d60ef6eda0, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/f29ece4b122442f394433ccf105db4fb, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/90e8ed33b5554531a79eccdeff54aba9, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/5beef804ea794bd8ad2e89de2f98121a] to archive 2024-12-12T05:39:58,603 DEBUG [StoreCloser-TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T05:39:58,606 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/124ed0f262ac4a548314be56609e0f00 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/124ed0f262ac4a548314be56609e0f00 2024-12-12T05:39:58,606 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/83da5e0af8e94600a9c358139e0721c2 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/83da5e0af8e94600a9c358139e0721c2 2024-12-12T05:39:58,606 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/cea44cd5e9b54454aaa81b5b732b92d1 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/cea44cd5e9b54454aaa81b5b732b92d1 2024-12-12T05:39:58,606 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/531efc3bad5248ca860b8c527b554b1e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/531efc3bad5248ca860b8c527b554b1e 2024-12-12T05:39:58,607 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/df7de6f3b15942f4b2cfa8984eb76b15 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/df7de6f3b15942f4b2cfa8984eb76b15 2024-12-12T05:39:58,607 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/a2460f1c746c4358b4c32a18f22570a9 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/a2460f1c746c4358b4c32a18f22570a9 2024-12-12T05:39:58,608 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/9a0eb16fbd0a4fbc87e6f8d30e22151f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/9a0eb16fbd0a4fbc87e6f8d30e22151f 2024-12-12T05:39:58,608 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/22a7621d750a465482c3441f566ceb5a to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/22a7621d750a465482c3441f566ceb5a 2024-12-12T05:39:58,609 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/65993ee67b2040b39a74f9ecb203aae8 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/65993ee67b2040b39a74f9ecb203aae8 2024-12-12T05:39:58,609 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/52230461c55a43e1ae00479c36cf8ba8 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/52230461c55a43e1ae00479c36cf8ba8 2024-12-12T05:39:58,609 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/0857c13656d84536a389deb8cdc03f5a to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/0857c13656d84536a389deb8cdc03f5a 2024-12-12T05:39:58,610 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/346e3938a33b496b88f6e34fbd41807b to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/346e3938a33b496b88f6e34fbd41807b 2024-12-12T05:39:58,610 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/53e0906053944099ac77fe499eb46d08 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/53e0906053944099ac77fe499eb46d08 2024-12-12T05:39:58,610 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/29b67554d4d04fe0bec43e64b1209a51 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/29b67554d4d04fe0bec43e64b1209a51 2024-12-12T05:39:58,610 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/fc02a13dbb0e4e37928f32159dd238f9 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/fc02a13dbb0e4e37928f32159dd238f9 2024-12-12T05:39:58,611 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/33d8ef679c12427eb2095af284549348 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/33d8ef679c12427eb2095af284549348 2024-12-12T05:39:58,611 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/db3b66191e22418db4a0fba303675982 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/db3b66191e22418db4a0fba303675982 2024-12-12T05:39:58,611 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/3188d9498ee54189bc954b362fb48b03 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/3188d9498ee54189bc954b362fb48b03 2024-12-12T05:39:58,612 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/187c3e9cc61345dfbe74bca197c3b245 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/187c3e9cc61345dfbe74bca197c3b245 2024-12-12T05:39:58,612 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/936b559d09b04b86ba9332cb01a0c5b7 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/936b559d09b04b86ba9332cb01a0c5b7 2024-12-12T05:39:58,612 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/51765d08e95741a59b5793d60ef6eda0 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/51765d08e95741a59b5793d60ef6eda0 2024-12-12T05:39:58,612 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/d9c5dd04de0a49799872e1848f0f930e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/d9c5dd04de0a49799872e1848f0f930e 2024-12-12T05:39:58,613 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/ee3d43d238df420cb4a49eabcb1a4090 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/ee3d43d238df420cb4a49eabcb1a4090 2024-12-12T05:39:58,613 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/f29ece4b122442f394433ccf105db4fb to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/f29ece4b122442f394433ccf105db4fb 2024-12-12T05:39:58,613 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/90e8ed33b5554531a79eccdeff54aba9 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/90e8ed33b5554531a79eccdeff54aba9 2024-12-12T05:39:58,613 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/5beef804ea794bd8ad2e89de2f98121a to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/5beef804ea794bd8ad2e89de2f98121a 2024-12-12T05:39:58,614 DEBUG [StoreCloser-TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/7040dd6a2d484f4ebda3feea4505f0fd, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/983f90661ac4459d99a1c8ff46b842aa, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/0088bff5b2d74fb3873e99c2fdd44fd5, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/6764f6cb186f4ea9b2543fe8f0a82381, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/88c3e5bac7c741f4852ba866bb799405, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/dee646a6e8f74519863e49cad6ff3509, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/376122f885f74165ae71a83ebc05739a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/0aae8439b507428fa612da89ebfa0926, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/b06b16ade0d64d90ab49d32eba2f1909, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/c3cb0b69ccc142fcb27bae2e18fd5ecd, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/6ed0383cc09345c391cacedfcba2818d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/90cc914f62bb49bf97c3d739d5af0cbc, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/e54091c9b7124168aaa95321958ad346, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/a220144071cc4454a789131edd4ee8b1, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/e97b8ef614d445318746450c7dc1ff5d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/8c233e83d965402a821a0852f9ebd97f, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/8e78d6cc220b4ce3a3e5bf8230d46f82, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/f735b93078cf4a629dcf5bd835c24389, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/62729d59b2804d43b581b0fdedcf2f6d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/91cdcc6fc2b245adbbab47d8efb1b442, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/86baa96a300e4084ab462ce48caf5883, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/7e51344d5dc44497b8b4c37e5bc88ccf, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/c369126e42974c669d82a6442cbb9aca, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/61dc4b879709410aa1220748b00c8007, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/483c9d779d0b4985b426a68ace22dfe0, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/46c27fb4443645c6bfa623dcea662ebd] to archive 2024-12-12T05:39:58,615 DEBUG [StoreCloser-TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T05:39:58,617 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/983f90661ac4459d99a1c8ff46b842aa to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/983f90661ac4459d99a1c8ff46b842aa 2024-12-12T05:39:58,617 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/376122f885f74165ae71a83ebc05739a to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/376122f885f74165ae71a83ebc05739a 2024-12-12T05:39:58,617 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/88c3e5bac7c741f4852ba866bb799405 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/88c3e5bac7c741f4852ba866bb799405 2024-12-12T05:39:58,617 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/0aae8439b507428fa612da89ebfa0926 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/0aae8439b507428fa612da89ebfa0926 2024-12-12T05:39:58,617 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/7040dd6a2d484f4ebda3feea4505f0fd to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/7040dd6a2d484f4ebda3feea4505f0fd 2024-12-12T05:39:58,617 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/6764f6cb186f4ea9b2543fe8f0a82381 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/6764f6cb186f4ea9b2543fe8f0a82381 2024-12-12T05:39:58,617 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/0088bff5b2d74fb3873e99c2fdd44fd5 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/0088bff5b2d74fb3873e99c2fdd44fd5 2024-12-12T05:39:58,617 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/dee646a6e8f74519863e49cad6ff3509 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/dee646a6e8f74519863e49cad6ff3509 2024-12-12T05:39:58,619 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/c3cb0b69ccc142fcb27bae2e18fd5ecd to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/c3cb0b69ccc142fcb27bae2e18fd5ecd 2024-12-12T05:39:58,619 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/90cc914f62bb49bf97c3d739d5af0cbc to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/90cc914f62bb49bf97c3d739d5af0cbc 2024-12-12T05:39:58,619 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/b06b16ade0d64d90ab49d32eba2f1909 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/b06b16ade0d64d90ab49d32eba2f1909 2024-12-12T05:39:58,619 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/6ed0383cc09345c391cacedfcba2818d to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/6ed0383cc09345c391cacedfcba2818d 2024-12-12T05:39:58,619 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/e54091c9b7124168aaa95321958ad346 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/e54091c9b7124168aaa95321958ad346 2024-12-12T05:39:58,619 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/a220144071cc4454a789131edd4ee8b1 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/a220144071cc4454a789131edd4ee8b1 2024-12-12T05:39:58,620 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/e97b8ef614d445318746450c7dc1ff5d to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/e97b8ef614d445318746450c7dc1ff5d 2024-12-12T05:39:58,620 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/8c233e83d965402a821a0852f9ebd97f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/8c233e83d965402a821a0852f9ebd97f 2024-12-12T05:39:58,620 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/8e78d6cc220b4ce3a3e5bf8230d46f82 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/8e78d6cc220b4ce3a3e5bf8230d46f82 2024-12-12T05:39:58,621 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/62729d59b2804d43b581b0fdedcf2f6d to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/62729d59b2804d43b581b0fdedcf2f6d 2024-12-12T05:39:58,621 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/86baa96a300e4084ab462ce48caf5883 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/86baa96a300e4084ab462ce48caf5883 2024-12-12T05:39:58,621 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/f735b93078cf4a629dcf5bd835c24389 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/f735b93078cf4a629dcf5bd835c24389 2024-12-12T05:39:58,621 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/91cdcc6fc2b245adbbab47d8efb1b442 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/91cdcc6fc2b245adbbab47d8efb1b442 2024-12-12T05:39:58,621 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/7e51344d5dc44497b8b4c37e5bc88ccf to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/7e51344d5dc44497b8b4c37e5bc88ccf 2024-12-12T05:39:58,622 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/483c9d779d0b4985b426a68ace22dfe0 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/483c9d779d0b4985b426a68ace22dfe0 2024-12-12T05:39:58,622 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/c369126e42974c669d82a6442cbb9aca to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/c369126e42974c669d82a6442cbb9aca 2024-12-12T05:39:58,622 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/46c27fb4443645c6bfa623dcea662ebd to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/46c27fb4443645c6bfa623dcea662ebd 2024-12-12T05:39:58,623 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/61dc4b879709410aa1220748b00c8007 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/61dc4b879709410aa1220748b00c8007 2024-12-12T05:39:58,628 DEBUG [StoreCloser-TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/a0a97730903e4b26885f7f1342d1d289, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/c05966d100854113bdaa4bf85fbf1dec, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/871888ecbdd44e19a903fb950c343efc, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/9d651d16eb8146319fd4d7b76cc925a5, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/2600738e975a41899b75711082c9f2f8, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/45de93ab564b4cf0b769194968f5a17d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/50066d24316f40ab9451bb3af916461f, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/bbfdd1a9334a43838ac54b083b6a3584, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/fcbc7b81b42d47dfbb2186fd707dbec6, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/6fbe1bb659f34696ac57c6b8240ae19a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/a505ca3bb82a4c36b504e636b5fb8d05, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/02daf90390d343e993f24612f69ac4d0, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/1a2c2f419e934211a521b0015da8eadb, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/5f8a12a01b07421f8048db32c07e0e54, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/2a98d6cb9a3044ab8f8152f0d5af484d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/17c97b56a81541cdb66dd5f7db4f7743, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/9659928f870f430dadb91d5cde9ce6df, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/bca5efa7719d47f4b65e0db2a0b8d209, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/8f82271716384adeb15abe0f276a8d75, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/0e8b9a331a594888aa45a3d70e99cde7, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/9296bb8f169749a3ae1fd2e46c345f1b, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/fc70e03942e24bbba7ab550094c652a0, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/3adc7ef63dab401e8bf8586a5f90b8c1, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/a1b81c600ffc4f44b5be29ccbf8e8327, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/308f3325b9714b96b8c8ffe510c6c517, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/c20df96eb4a446cb9d4b6d4825d74691] to archive 2024-12-12T05:39:58,629 DEBUG [StoreCloser-TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T05:39:58,631 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/c05966d100854113bdaa4bf85fbf1dec to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/c05966d100854113bdaa4bf85fbf1dec 2024-12-12T05:39:58,631 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/50066d24316f40ab9451bb3af916461f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/50066d24316f40ab9451bb3af916461f 2024-12-12T05:39:58,631 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/a0a97730903e4b26885f7f1342d1d289 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/a0a97730903e4b26885f7f1342d1d289 2024-12-12T05:39:58,631 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/9d651d16eb8146319fd4d7b76cc925a5 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/9d651d16eb8146319fd4d7b76cc925a5 2024-12-12T05:39:58,631 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/871888ecbdd44e19a903fb950c343efc to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/871888ecbdd44e19a903fb950c343efc 2024-12-12T05:39:58,631 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/2600738e975a41899b75711082c9f2f8 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/2600738e975a41899b75711082c9f2f8 2024-12-12T05:39:58,631 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/45de93ab564b4cf0b769194968f5a17d to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/45de93ab564b4cf0b769194968f5a17d 2024-12-12T05:39:58,631 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/bbfdd1a9334a43838ac54b083b6a3584 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/bbfdd1a9334a43838ac54b083b6a3584 2024-12-12T05:39:58,633 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/6fbe1bb659f34696ac57c6b8240ae19a to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/6fbe1bb659f34696ac57c6b8240ae19a 2024-12-12T05:39:58,633 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/fcbc7b81b42d47dfbb2186fd707dbec6 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/fcbc7b81b42d47dfbb2186fd707dbec6 2024-12-12T05:39:58,633 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/1a2c2f419e934211a521b0015da8eadb to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/1a2c2f419e934211a521b0015da8eadb 2024-12-12T05:39:58,633 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/02daf90390d343e993f24612f69ac4d0 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/02daf90390d343e993f24612f69ac4d0 2024-12-12T05:39:58,633 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/5f8a12a01b07421f8048db32c07e0e54 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/5f8a12a01b07421f8048db32c07e0e54 2024-12-12T05:39:58,633 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/2a98d6cb9a3044ab8f8152f0d5af484d to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/2a98d6cb9a3044ab8f8152f0d5af484d 2024-12-12T05:39:58,634 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/17c97b56a81541cdb66dd5f7db4f7743 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/17c97b56a81541cdb66dd5f7db4f7743 2024-12-12T05:39:58,634 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/bca5efa7719d47f4b65e0db2a0b8d209 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/bca5efa7719d47f4b65e0db2a0b8d209 2024-12-12T05:39:58,634 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/8f82271716384adeb15abe0f276a8d75 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/8f82271716384adeb15abe0f276a8d75 2024-12-12T05:39:58,635 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/0e8b9a331a594888aa45a3d70e99cde7 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/0e8b9a331a594888aa45a3d70e99cde7 2024-12-12T05:39:58,635 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/9659928f870f430dadb91d5cde9ce6df to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/9659928f870f430dadb91d5cde9ce6df 2024-12-12T05:39:58,635 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/9296bb8f169749a3ae1fd2e46c345f1b to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/9296bb8f169749a3ae1fd2e46c345f1b 2024-12-12T05:39:58,635 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/fc70e03942e24bbba7ab550094c652a0 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/fc70e03942e24bbba7ab550094c652a0 2024-12-12T05:39:58,636 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/3adc7ef63dab401e8bf8586a5f90b8c1 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/3adc7ef63dab401e8bf8586a5f90b8c1 2024-12-12T05:39:58,636 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/308f3325b9714b96b8c8ffe510c6c517 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/308f3325b9714b96b8c8ffe510c6c517 2024-12-12T05:39:58,636 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/a1b81c600ffc4f44b5be29ccbf8e8327 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/a1b81c600ffc4f44b5be29ccbf8e8327 2024-12-12T05:39:58,636 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/c20df96eb4a446cb9d4b6d4825d74691 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/c20df96eb4a446cb9d4b6d4825d74691 2024-12-12T05:39:58,637 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/a505ca3bb82a4c36b504e636b5fb8d05 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/a505ca3bb82a4c36b504e636b5fb8d05 2024-12-12T05:39:58,640 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/recovered.edits/395.seqid, newMaxSeqId=395, maxSeqId=4 2024-12-12T05:39:58,641 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e. 2024-12-12T05:39:58,641 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(1635): Region close journal for 92b0b352d91bdb0e121a8902637d8c5e: 2024-12-12T05:39:58,642 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] handler.UnassignRegionHandler(170): Closed 92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:58,643 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=67 updating hbase:meta row=92b0b352d91bdb0e121a8902637d8c5e, regionState=CLOSED 2024-12-12T05:39:58,644 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-12-12T05:39:58,645 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; CloseRegionProcedure 92b0b352d91bdb0e121a8902637d8c5e, server=83e80bf221ca,46457,1733981928566 in 1.8980 sec 2024-12-12T05:39:58,645 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=67, resume processing ppid=66 2024-12-12T05:39:58,645 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, ppid=66, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=92b0b352d91bdb0e121a8902637d8c5e, UNASSIGN in 1.9040 sec 2024-12-12T05:39:58,646 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-12-12T05:39:58,647 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9080 sec 2024-12-12T05:39:58,647 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733981998647"}]},"ts":"1733981998647"} 2024-12-12T05:39:58,648 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-12T05:39:58,686 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-12T05:39:58,687 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.0020 sec 2024-12-12T05:39:58,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-12T05:39:58,795 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 65 completed 2024-12-12T05:39:58,795 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-12T05:39:58,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:39:58,797 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=69, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:39:58,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-12T05:39:58,798 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=69, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:39:58,800 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:58,804 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A, FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B, FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C, FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/recovered.edits] 2024-12-12T05:39:58,809 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/0973b5413c4c440cb65ea01247c3d05b to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/0973b5413c4c440cb65ea01247c3d05b 2024-12-12T05:39:58,810 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/1593f1fb25654843afa9b98c1cc4818f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/1593f1fb25654843afa9b98c1cc4818f 2024-12-12T05:39:58,810 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/e4231d1424a04c4db1f25095a73b17ee to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/A/e4231d1424a04c4db1f25095a73b17ee 2024-12-12T05:39:58,814 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/618fc6a0be6e463784093895f09cb2ac to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/618fc6a0be6e463784093895f09cb2ac 2024-12-12T05:39:58,814 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/f1a4f701e2444e17b0e2bd21b6c5f499 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/f1a4f701e2444e17b0e2bd21b6c5f499 2024-12-12T05:39:58,814 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/97ce618d596847b591bffde816aee60a to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/B/97ce618d596847b591bffde816aee60a 2024-12-12T05:39:58,819 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/bce152485428427989f4a18ce8abe020 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/bce152485428427989f4a18ce8abe020 2024-12-12T05:39:58,819 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/07195be185354406a09df55ebcdd3008 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/07195be185354406a09df55ebcdd3008 2024-12-12T05:39:58,819 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/0dbe826aac214f36adee67f8968aae0a to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/C/0dbe826aac214f36adee67f8968aae0a 2024-12-12T05:39:58,823 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/recovered.edits/395.seqid to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e/recovered.edits/395.seqid 2024-12-12T05:39:58,824 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:58,824 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-12T05:39:58,825 DEBUG [PEWorker-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-12T05:39:58,825 DEBUG [PEWorker-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-12T05:39:58,833 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121239f58ada9324486fad7d830984c3e400_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121239f58ada9324486fad7d830984c3e400_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:58,833 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212075bbd470fab43c5be0139bba50a4dfd_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212075bbd470fab43c5be0139bba50a4dfd_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:58,833 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121228d5a97220dd4f81aa97dacf3a5fefc3_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121228d5a97220dd4f81aa97dacf3a5fefc3_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:58,833 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123f958f8eb4584d13bffdbefc8e26d191_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123f958f8eb4584d13bffdbefc8e26d191_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:58,833 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123753626ead564870bd2ee12312fa554d_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123753626ead564870bd2ee12312fa554d_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:58,834 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121235f8567a0cff4aa6897c7e23d72a552f_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121235f8567a0cff4aa6897c7e23d72a552f_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:58,834 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212658cedca716a4df491f8b2f60155f563_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212658cedca716a4df491f8b2f60155f563_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:58,834 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412124572fdd38232445895f628be06ab19ab_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412124572fdd38232445895f628be06ab19ab_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:58,835 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212734deb8c84284ab1931357771c125a9c_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212734deb8c84284ab1931357771c125a9c_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:58,835 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412127f6d5ac5bc8849959546b2237067110c_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412127f6d5ac5bc8849959546b2237067110c_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:58,835 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212866c08560a7d4848b4089ba412c26cad_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212866c08560a7d4848b4089ba412c26cad_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:58,835 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212920e80c43b634c4091f35c224610c33a_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212920e80c43b634c4091f35c224610c33a_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:58,835 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412127d0b28bc6eb549cbb0889e476984a044_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412127d0b28bc6eb549cbb0889e476984a044_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:58,835 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121297aeddb2ff1c4bbcb7ec854d9cbb74f6_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121297aeddb2ff1c4bbcb7ec854d9cbb74f6_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:58,835 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212b6cccb37509f403aa0f7b7668a5f1a1c_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212b6cccb37509f403aa0f7b7668a5f1a1c_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:58,835 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c7ceceb43b064554acf1a92aae86ff1f_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c7ceceb43b064554acf1a92aae86ff1f_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:58,836 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212ca2f9c4b05a4423888d5b6c5a827aa90_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212ca2f9c4b05a4423888d5b6c5a827aa90_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:58,836 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212d4eb1dc6bec64c4cb4d99117495515fb_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212d4eb1dc6bec64c4cb4d99117495515fb_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:58,836 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212e220c3c506a14742b024ada4621718a3_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212e220c3c506a14742b024ada4621718a3_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:58,836 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212eda36a7a59b847fb976dd5002aaa5987_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212eda36a7a59b847fb976dd5002aaa5987_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:58,836 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212f4a6308ee6d7467fa7300f472353c376_92b0b352d91bdb0e121a8902637d8c5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212f4a6308ee6d7467fa7300f472353c376_92b0b352d91bdb0e121a8902637d8c5e 2024-12-12T05:39:58,837 DEBUG [PEWorker-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-12T05:39:58,838 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=69, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:39:58,841 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-12T05:39:58,843 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-12T05:39:58,844 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=69, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:39:58,844 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-12T05:39:58,844 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733981998844"}]},"ts":"9223372036854775807"} 2024-12-12T05:39:58,846 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-12T05:39:58,846 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 92b0b352d91bdb0e121a8902637d8c5e, NAME => 'TestAcidGuarantees,,1733981963161.92b0b352d91bdb0e121a8902637d8c5e.', STARTKEY => '', ENDKEY => ''}] 2024-12-12T05:39:58,846 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-12T05:39:58,846 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733981998846"}]},"ts":"9223372036854775807"} 2024-12-12T05:39:58,847 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-12T05:39:58,853 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=69, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:39:58,854 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 58 msec 2024-12-12T05:39:58,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-12T05:39:58,899 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-12-12T05:39:58,908 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=244 (was 246), OpenFileDescriptor=451 (was 454), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=268 (was 257) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=13318 (was 13473) 2024-12-12T05:39:58,916 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=244, OpenFileDescriptor=451, MaxFileDescriptor=1048576, SystemLoadAverage=268, ProcessCount=11, AvailableMemoryMB=13318 2024-12-12T05:39:58,917 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T05:39:58,918 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T05:39:58,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=70, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-12T05:39:58,919 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=70, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T05:39:58,919 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:39:58,919 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 70 2024-12-12T05:39:58,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=70 2024-12-12T05:39:58,920 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=70, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T05:39:58,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742078_1254 (size=963) 2024-12-12T05:39:59,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=70 2024-12-12T05:39:59,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=70 2024-12-12T05:39:59,332 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d 2024-12-12T05:39:59,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742079_1255 (size=53) 2024-12-12T05:39:59,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=70 2024-12-12T05:39:59,744 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:39:59,744 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 7b5eb541c096811fd997fc2b7e27d07f, disabling compactions & flushes 2024-12-12T05:39:59,744 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:39:59,744 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:39:59,744 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. after waiting 0 ms 2024-12-12T05:39:59,744 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:39:59,744 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:39:59,745 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:39:59,747 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=70, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T05:39:59,748 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733981999748"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733981999748"}]},"ts":"1733981999748"} 2024-12-12T05:39:59,751 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T05:39:59,752 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=70, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T05:39:59,752 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733981999752"}]},"ts":"1733981999752"} 2024-12-12T05:39:59,752 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-12T05:39:59,803 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b5eb541c096811fd997fc2b7e27d07f, ASSIGN}] 2024-12-12T05:39:59,805 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b5eb541c096811fd997fc2b7e27d07f, ASSIGN 2024-12-12T05:39:59,806 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b5eb541c096811fd997fc2b7e27d07f, ASSIGN; state=OFFLINE, location=83e80bf221ca,46457,1733981928566; forceNewPlan=false, retain=false 2024-12-12T05:39:59,956 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=71 updating hbase:meta row=7b5eb541c096811fd997fc2b7e27d07f, regionState=OPENING, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:39:59,958 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; OpenRegionProcedure 7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566}] 2024-12-12T05:40:00,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=70 2024-12-12T05:40:00,109 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:00,114 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:00,114 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(7285): Opening region: {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} 2024-12-12T05:40:00,115 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:00,115 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:40:00,115 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(7327): checking encryption for 7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:00,115 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(7330): checking classloading for 7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:00,118 INFO [StoreOpener-7b5eb541c096811fd997fc2b7e27d07f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:00,121 INFO [StoreOpener-7b5eb541c096811fd997fc2b7e27d07f-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T05:40:00,121 INFO [StoreOpener-7b5eb541c096811fd997fc2b7e27d07f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7b5eb541c096811fd997fc2b7e27d07f columnFamilyName A 2024-12-12T05:40:00,121 DEBUG [StoreOpener-7b5eb541c096811fd997fc2b7e27d07f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:40:00,122 INFO [StoreOpener-7b5eb541c096811fd997fc2b7e27d07f-1 {}] regionserver.HStore(327): Store=7b5eb541c096811fd997fc2b7e27d07f/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:40:00,122 INFO [StoreOpener-7b5eb541c096811fd997fc2b7e27d07f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:00,124 INFO [StoreOpener-7b5eb541c096811fd997fc2b7e27d07f-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T05:40:00,125 INFO [StoreOpener-7b5eb541c096811fd997fc2b7e27d07f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7b5eb541c096811fd997fc2b7e27d07f columnFamilyName B 2024-12-12T05:40:00,125 DEBUG [StoreOpener-7b5eb541c096811fd997fc2b7e27d07f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:40:00,126 INFO [StoreOpener-7b5eb541c096811fd997fc2b7e27d07f-1 {}] regionserver.HStore(327): Store=7b5eb541c096811fd997fc2b7e27d07f/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:40:00,126 INFO [StoreOpener-7b5eb541c096811fd997fc2b7e27d07f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:00,127 INFO [StoreOpener-7b5eb541c096811fd997fc2b7e27d07f-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T05:40:00,127 INFO [StoreOpener-7b5eb541c096811fd997fc2b7e27d07f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7b5eb541c096811fd997fc2b7e27d07f columnFamilyName C 2024-12-12T05:40:00,128 DEBUG [StoreOpener-7b5eb541c096811fd997fc2b7e27d07f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:40:00,128 INFO [StoreOpener-7b5eb541c096811fd997fc2b7e27d07f-1 {}] regionserver.HStore(327): Store=7b5eb541c096811fd997fc2b7e27d07f/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:40:00,128 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:00,129 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:00,130 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:00,132 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T05:40:00,134 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(1085): writing seq id for 7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:00,136 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T05:40:00,137 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(1102): Opened 7b5eb541c096811fd997fc2b7e27d07f; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74703382, jitterRate=0.11316713690757751}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T05:40:00,138 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(1001): Region open journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:00,139 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., pid=72, masterSystemTime=1733982000109 2024-12-12T05:40:00,141 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:00,141 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:00,141 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=71 updating hbase:meta row=7b5eb541c096811fd997fc2b7e27d07f, regionState=OPEN, openSeqNum=2, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:00,144 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-12-12T05:40:00,144 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; OpenRegionProcedure 7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 in 184 msec 2024-12-12T05:40:00,146 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=71, resume processing ppid=70 2024-12-12T05:40:00,146 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, ppid=70, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b5eb541c096811fd997fc2b7e27d07f, ASSIGN in 341 msec 2024-12-12T05:40:00,146 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=70, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T05:40:00,146 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733982000146"}]},"ts":"1733982000146"} 2024-12-12T05:40:00,147 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-12T05:40:00,162 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=70, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T05:40:00,164 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2440 sec 2024-12-12T05:40:01,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=70 2024-12-12T05:40:01,026 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 70 completed 2024-12-12T05:40:01,031 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4ec09297 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8d0caa5 2024-12-12T05:40:01,070 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34cb3991, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:01,072 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:01,074 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35278, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:01,075 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T05:40:01,076 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52592, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T05:40:01,080 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4dfb20f6 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@43f04e0e 2024-12-12T05:40:01,087 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e9ae050, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:01,088 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17cf7fc0 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@560ec309 2024-12-12T05:40:01,095 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2fef31f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:01,096 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x78b04266 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5886c0f2 2024-12-12T05:40:01,103 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@eb04aeb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:01,105 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x088aa519 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@66e575aa 2024-12-12T05:40:01,112 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a0e9c8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:01,114 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5e998dd3 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@131ceb8f 2024-12-12T05:40:01,120 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d68f787, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:01,121 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5a78bf6d to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@10e6bf6a 2024-12-12T05:40:01,128 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@605827c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:01,129 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x328852db to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1730a60f 2024-12-12T05:40:01,137 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3677bd4f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:01,138 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4b9e2976 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@598cfed4 2024-12-12T05:40:01,145 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@521aad6f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:01,146 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x56e9a678 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@68ad882f 2024-12-12T05:40:01,153 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f5b2180, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:01,154 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2f7f772a to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2b976e1a 2024-12-12T05:40:01,162 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1df61dc9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:01,164 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:40:01,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-12-12T05:40:01,165 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:40:01,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-12T05:40:01,166 DEBUG [hconnection-0x6e8ccc93-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:01,166 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:40:01,166 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:40:01,166 DEBUG [hconnection-0x3c14ba38-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:01,166 DEBUG [hconnection-0x255042f8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:01,167 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35282, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:01,167 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35290, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:01,167 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35298, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:01,175 DEBUG [hconnection-0x7673526d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:01,176 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35310, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:01,179 DEBUG [hconnection-0x58a9cdd1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:01,180 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35316, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:01,187 DEBUG [hconnection-0x7fe84ace-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:01,188 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35326, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:01,191 DEBUG [hconnection-0x23f51866-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:01,192 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35328, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:01,195 DEBUG [hconnection-0x7e4f31e3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:01,195 DEBUG [hconnection-0xfe46222-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:01,196 DEBUG [hconnection-0x391a311d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:01,196 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35344, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:01,196 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35334, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:01,196 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35336, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:01,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:01,214 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b5eb541c096811fd997fc2b7e27d07f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T05:40:01,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=A 2024-12-12T05:40:01,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:01,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=B 2024-12-12T05:40:01,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:01,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=C 2024-12-12T05:40:01,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:01,235 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/0e885bcb1b9047ddb992db34c013d139 is 50, key is test_row_0/A:col10/1733982001213/Put/seqid=0 2024-12-12T05:40:01,240 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982061236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,240 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982061236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,241 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982061236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982061238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,241 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982061238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742080_1256 (size=12001) 2024-12-12T05:40:01,253 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/0e885bcb1b9047ddb992db34c013d139 2024-12-12T05:40:01,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-12T05:40:01,281 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/c16432ff81c04e69afe0d4a3798bd1da is 50, key is test_row_0/B:col10/1733982001213/Put/seqid=0 2024-12-12T05:40:01,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742081_1257 (size=12001) 2024-12-12T05:40:01,309 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/c16432ff81c04e69afe0d4a3798bd1da 2024-12-12T05:40:01,317 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,318 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-12T05:40:01,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:01,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:01,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:01,318 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:01,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:01,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:01,336 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/e42b0c386ce24ae99437d76611fff896 is 50, key is test_row_0/C:col10/1733982001213/Put/seqid=0 2024-12-12T05:40:01,344 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982061342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,344 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982061342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,344 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982061342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,344 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982061342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,345 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982061342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742082_1258 (size=12001) 2024-12-12T05:40:01,348 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/e42b0c386ce24ae99437d76611fff896 2024-12-12T05:40:01,354 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/0e885bcb1b9047ddb992db34c013d139 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/0e885bcb1b9047ddb992db34c013d139 2024-12-12T05:40:01,358 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/0e885bcb1b9047ddb992db34c013d139, entries=150, sequenceid=13, filesize=11.7 K 2024-12-12T05:40:01,360 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/c16432ff81c04e69afe0d4a3798bd1da as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/c16432ff81c04e69afe0d4a3798bd1da 2024-12-12T05:40:01,364 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/c16432ff81c04e69afe0d4a3798bd1da, entries=150, sequenceid=13, filesize=11.7 K 2024-12-12T05:40:01,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/e42b0c386ce24ae99437d76611fff896 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/e42b0c386ce24ae99437d76611fff896 2024-12-12T05:40:01,374 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/e42b0c386ce24ae99437d76611fff896, entries=150, sequenceid=13, filesize=11.7 K 2024-12-12T05:40:01,375 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 7b5eb541c096811fd997fc2b7e27d07f in 161ms, sequenceid=13, compaction requested=false 2024-12-12T05:40:01,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:01,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-12T05:40:01,471 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,471 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-12T05:40:01,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:01,471 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 7b5eb541c096811fd997fc2b7e27d07f 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T05:40:01,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=A 2024-12-12T05:40:01,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:01,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=B 2024-12-12T05:40:01,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:01,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=C 2024-12-12T05:40:01,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:01,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/c1e889c5d91d4a71a5d29a8edbc7fb69 is 50, key is test_row_0/A:col10/1733982001236/Put/seqid=0 2024-12-12T05:40:01,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742083_1259 (size=12001) 2024-12-12T05:40:01,506 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/c1e889c5d91d4a71a5d29a8edbc7fb69 2024-12-12T05:40:01,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/8498cf857dd249e285377f5559421ff9 is 50, key is test_row_0/B:col10/1733982001236/Put/seqid=0 2024-12-12T05:40:01,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742084_1260 (size=12001) 2024-12-12T05:40:01,522 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/8498cf857dd249e285377f5559421ff9 2024-12-12T05:40:01,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/b0356f9fe30447b0a19ff7f1156ce9aa is 50, key is test_row_0/C:col10/1733982001236/Put/seqid=0 2024-12-12T05:40:01,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742085_1261 (size=12001) 2024-12-12T05:40:01,547 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:01,547 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/b0356f9fe30447b0a19ff7f1156ce9aa 2024-12-12T05:40:01,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:01,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/c1e889c5d91d4a71a5d29a8edbc7fb69 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/c1e889c5d91d4a71a5d29a8edbc7fb69 2024-12-12T05:40:01,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982061550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,554 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982061550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982061553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982061553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982061553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,559 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/c1e889c5d91d4a71a5d29a8edbc7fb69, entries=150, sequenceid=37, filesize=11.7 K 2024-12-12T05:40:01,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/8498cf857dd249e285377f5559421ff9 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/8498cf857dd249e285377f5559421ff9 2024-12-12T05:40:01,567 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/8498cf857dd249e285377f5559421ff9, entries=150, sequenceid=37, filesize=11.7 K 2024-12-12T05:40:01,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/b0356f9fe30447b0a19ff7f1156ce9aa as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/b0356f9fe30447b0a19ff7f1156ce9aa 2024-12-12T05:40:01,572 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/b0356f9fe30447b0a19ff7f1156ce9aa, entries=150, sequenceid=37, filesize=11.7 K 2024-12-12T05:40:01,573 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 7b5eb541c096811fd997fc2b7e27d07f in 102ms, sequenceid=37, compaction requested=false 2024-12-12T05:40:01,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:01,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:01,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-12-12T05:40:01,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-12-12T05:40:01,577 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-12-12T05:40:01,577 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 409 msec 2024-12-12T05:40:01,579 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 413 msec 2024-12-12T05:40:01,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:01,657 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b5eb541c096811fd997fc2b7e27d07f 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-12T05:40:01,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=A 2024-12-12T05:40:01,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:01,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=B 2024-12-12T05:40:01,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:01,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=C 2024-12-12T05:40:01,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:01,662 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/f499c03d28aa4f06827e8819002d1c70 is 50, key is test_row_0/A:col10/1733982001655/Put/seqid=0 2024-12-12T05:40:01,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742086_1262 (size=16681) 2024-12-12T05:40:01,666 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/f499c03d28aa4f06827e8819002d1c70 2024-12-12T05:40:01,675 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/3c5d21f7b72c4d07badbf816b76e7af8 is 50, key is test_row_0/B:col10/1733982001655/Put/seqid=0 2024-12-12T05:40:01,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982061678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982061678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982061679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982061679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742087_1263 (size=12001) 2024-12-12T05:40:01,685 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982061685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,689 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/3c5d21f7b72c4d07badbf816b76e7af8 2024-12-12T05:40:01,720 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/3137cdc9907c4511991f83c39108b981 is 50, key is test_row_0/C:col10/1733982001655/Put/seqid=0 2024-12-12T05:40:01,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742088_1264 (size=12001) 2024-12-12T05:40:01,734 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/3137cdc9907c4511991f83c39108b981 2024-12-12T05:40:01,740 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/f499c03d28aa4f06827e8819002d1c70 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/f499c03d28aa4f06827e8819002d1c70 2024-12-12T05:40:01,745 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/f499c03d28aa4f06827e8819002d1c70, entries=250, sequenceid=51, filesize=16.3 K 2024-12-12T05:40:01,746 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/3c5d21f7b72c4d07badbf816b76e7af8 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/3c5d21f7b72c4d07badbf816b76e7af8 2024-12-12T05:40:01,752 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/3c5d21f7b72c4d07badbf816b76e7af8, entries=150, sequenceid=51, filesize=11.7 K 2024-12-12T05:40:01,753 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/3137cdc9907c4511991f83c39108b981 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/3137cdc9907c4511991f83c39108b981 2024-12-12T05:40:01,758 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/3137cdc9907c4511991f83c39108b981, entries=150, sequenceid=51, filesize=11.7 K 2024-12-12T05:40:01,759 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 7b5eb541c096811fd997fc2b7e27d07f in 103ms, sequenceid=51, compaction requested=true 2024-12-12T05:40:01,759 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:01,759 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:01,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b5eb541c096811fd997fc2b7e27d07f:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:40:01,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:01,760 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:01,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b5eb541c096811fd997fc2b7e27d07f:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:40:01,761 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40683 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:01,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:01,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b5eb541c096811fd997fc2b7e27d07f:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:40:01,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:40:01,761 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 7b5eb541c096811fd997fc2b7e27d07f/A is initiating minor compaction (all files) 2024-12-12T05:40:01,761 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b5eb541c096811fd997fc2b7e27d07f/A in TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:01,761 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/0e885bcb1b9047ddb992db34c013d139, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/c1e889c5d91d4a71a5d29a8edbc7fb69, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/f499c03d28aa4f06827e8819002d1c70] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp, totalSize=39.7 K 2024-12-12T05:40:01,761 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:01,762 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 7b5eb541c096811fd997fc2b7e27d07f/B is initiating minor compaction (all files) 2024-12-12T05:40:01,762 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e885bcb1b9047ddb992db34c013d139, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733982001210 2024-12-12T05:40:01,762 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b5eb541c096811fd997fc2b7e27d07f/B in TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:01,762 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/c16432ff81c04e69afe0d4a3798bd1da, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/8498cf857dd249e285377f5559421ff9, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/3c5d21f7b72c4d07badbf816b76e7af8] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp, totalSize=35.2 K 2024-12-12T05:40:01,763 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting c16432ff81c04e69afe0d4a3798bd1da, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733982001210 2024-12-12T05:40:01,763 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting c1e889c5d91d4a71a5d29a8edbc7fb69, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733982001230 2024-12-12T05:40:01,763 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting f499c03d28aa4f06827e8819002d1c70, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733982001551 2024-12-12T05:40:01,763 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 8498cf857dd249e285377f5559421ff9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733982001230 2024-12-12T05:40:01,764 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c5d21f7b72c4d07badbf816b76e7af8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733982001551 2024-12-12T05:40:01,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-12T05:40:01,768 INFO [Thread-1180 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-12-12T05:40:01,769 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:40:01,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-12-12T05:40:01,771 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:40:01,771 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:40:01,771 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:40:01,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-12T05:40:01,777 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b5eb541c096811fd997fc2b7e27d07f#A#compaction#216 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:01,778 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/d20b45abc55647cabfc63a1abae2aff4 is 50, key is test_row_0/A:col10/1733982001655/Put/seqid=0 2024-12-12T05:40:01,780 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b5eb541c096811fd997fc2b7e27d07f#B#compaction#217 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:01,781 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/0e05a93d04034f5db25c7bfe859cc3b9 is 50, key is test_row_0/B:col10/1733982001655/Put/seqid=0 2024-12-12T05:40:01,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:01,786 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b5eb541c096811fd997fc2b7e27d07f 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T05:40:01,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=A 2024-12-12T05:40:01,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:01,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=B 2024-12-12T05:40:01,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:01,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=C 2024-12-12T05:40:01,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:01,795 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/b9b5b8517a7b4635a7d72c6366956aee is 50, key is test_row_0/A:col10/1733982001786/Put/seqid=0 2024-12-12T05:40:01,798 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982061793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,800 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982061794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,800 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742090_1266 (size=12104) 2024-12-12T05:40:01,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982061795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982061796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742089_1265 (size=12104) 2024-12-12T05:40:01,807 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982061805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742091_1267 (size=12001) 2024-12-12T05:40:01,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-12T05:40:01,900 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982061899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,903 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982061901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,903 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982061901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982061904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:01,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982061908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,926 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:01,926 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-12T05:40:01,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:01,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:01,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:01,926 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:01,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:01,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:02,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-12T05:40:02,078 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:02,079 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-12T05:40:02,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:02,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:02,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:02,079 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:02,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:02,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:02,103 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:02,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982062103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:02,106 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:02,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982062105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:02,106 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:02,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982062105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:02,108 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:02,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982062108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:02,112 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:02,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982062111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:02,206 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/0e05a93d04034f5db25c7bfe859cc3b9 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/0e05a93d04034f5db25c7bfe859cc3b9 2024-12-12T05:40:02,209 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/d20b45abc55647cabfc63a1abae2aff4 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/d20b45abc55647cabfc63a1abae2aff4 2024-12-12T05:40:02,210 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b5eb541c096811fd997fc2b7e27d07f/B of 7b5eb541c096811fd997fc2b7e27d07f into 0e05a93d04034f5db25c7bfe859cc3b9(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:02,210 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:02,210 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., storeName=7b5eb541c096811fd997fc2b7e27d07f/B, priority=13, startTime=1733982001760; duration=0sec 2024-12-12T05:40:02,210 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:40:02,210 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b5eb541c096811fd997fc2b7e27d07f:B 2024-12-12T05:40:02,210 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:02,211 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:02,211 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 7b5eb541c096811fd997fc2b7e27d07f/C is initiating minor compaction (all files) 2024-12-12T05:40:02,211 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b5eb541c096811fd997fc2b7e27d07f/C in TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:02,212 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/e42b0c386ce24ae99437d76611fff896, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/b0356f9fe30447b0a19ff7f1156ce9aa, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/3137cdc9907c4511991f83c39108b981] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp, totalSize=35.2 K 2024-12-12T05:40:02,212 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting e42b0c386ce24ae99437d76611fff896, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733982001210 2024-12-12T05:40:02,212 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting b0356f9fe30447b0a19ff7f1156ce9aa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733982001230 2024-12-12T05:40:02,213 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 3137cdc9907c4511991f83c39108b981, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733982001551 2024-12-12T05:40:02,213 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b5eb541c096811fd997fc2b7e27d07f/A of 7b5eb541c096811fd997fc2b7e27d07f into d20b45abc55647cabfc63a1abae2aff4(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:02,213 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:02,213 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., storeName=7b5eb541c096811fd997fc2b7e27d07f/A, priority=13, startTime=1733982001759; duration=0sec 2024-12-12T05:40:02,214 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:02,214 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b5eb541c096811fd997fc2b7e27d07f:A 2024-12-12T05:40:02,219 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b5eb541c096811fd997fc2b7e27d07f#C#compaction#219 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:02,219 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/f61154f9f1a2468ca138373d718e6baa is 50, key is test_row_0/C:col10/1733982001655/Put/seqid=0 2024-12-12T05:40:02,220 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/b9b5b8517a7b4635a7d72c6366956aee 2024-12-12T05:40:02,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742092_1268 (size=12104) 2024-12-12T05:40:02,230 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/c7cee429460c4eb5a2ed20a03bee8f22 is 50, key is test_row_0/B:col10/1733982001786/Put/seqid=0 2024-12-12T05:40:02,230 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/f61154f9f1a2468ca138373d718e6baa as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/f61154f9f1a2468ca138373d718e6baa 2024-12-12T05:40:02,232 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:02,233 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-12T05:40:02,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:02,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:02,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:02,233 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:02,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:02,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:02,237 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b5eb541c096811fd997fc2b7e27d07f/C of 7b5eb541c096811fd997fc2b7e27d07f into f61154f9f1a2468ca138373d718e6baa(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:02,237 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:02,237 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., storeName=7b5eb541c096811fd997fc2b7e27d07f/C, priority=13, startTime=1733982001761; duration=0sec 2024-12-12T05:40:02,237 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:02,237 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b5eb541c096811fd997fc2b7e27d07f:C 2024-12-12T05:40:02,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742093_1269 (size=12001) 2024-12-12T05:40:02,249 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/c7cee429460c4eb5a2ed20a03bee8f22 2024-12-12T05:40:02,256 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/0a955b1ebc664a8b9311c292233aeba6 is 50, key is test_row_0/C:col10/1733982001786/Put/seqid=0 2024-12-12T05:40:02,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742094_1270 (size=12001) 2024-12-12T05:40:02,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-12T05:40:02,386 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:02,386 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-12T05:40:02,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:02,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:02,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:02,387 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:02,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:02,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:02,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:02,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982062404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:02,408 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:02,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982062408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:02,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:02,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982062409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:02,411 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:02,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982062410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:02,415 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:02,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982062414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:02,538 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:02,539 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-12T05:40:02,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:02,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:02,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:02,539 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:02,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:02,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:02,663 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/0a955b1ebc664a8b9311c292233aeba6 2024-12-12T05:40:02,667 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/b9b5b8517a7b4635a7d72c6366956aee as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/b9b5b8517a7b4635a7d72c6366956aee 2024-12-12T05:40:02,671 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/b9b5b8517a7b4635a7d72c6366956aee, entries=150, sequenceid=77, filesize=11.7 K 2024-12-12T05:40:02,671 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/c7cee429460c4eb5a2ed20a03bee8f22 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/c7cee429460c4eb5a2ed20a03bee8f22 2024-12-12T05:40:02,674 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/c7cee429460c4eb5a2ed20a03bee8f22, entries=150, sequenceid=77, filesize=11.7 K 2024-12-12T05:40:02,675 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/0a955b1ebc664a8b9311c292233aeba6 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/0a955b1ebc664a8b9311c292233aeba6 2024-12-12T05:40:02,679 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/0a955b1ebc664a8b9311c292233aeba6, entries=150, sequenceid=77, filesize=11.7 K 2024-12-12T05:40:02,679 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 7b5eb541c096811fd997fc2b7e27d07f in 893ms, sequenceid=77, compaction requested=false 2024-12-12T05:40:02,679 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:02,690 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:02,691 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-12T05:40:02,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:02,691 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 7b5eb541c096811fd997fc2b7e27d07f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T05:40:02,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=A 2024-12-12T05:40:02,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:02,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=B 2024-12-12T05:40:02,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:02,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=C 2024-12-12T05:40:02,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:02,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/4fba4c77a3734f3ebb1b90bb9cce4074 is 50, key is test_row_1/A:col10/1733982001794/Put/seqid=0 2024-12-12T05:40:02,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742095_1271 (size=9657) 2024-12-12T05:40:02,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-12T05:40:02,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:02,909 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:02,927 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:02,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982062924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:02,928 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:02,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982062924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:02,928 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:02,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982062924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:02,929 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:02,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982062926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:02,930 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:02,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982062927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:03,030 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:03,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:03,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982063028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:03,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982063028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:03,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:03,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982063029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:03,031 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:03,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982063030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:03,032 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:03,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982063031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:03,099 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/4fba4c77a3734f3ebb1b90bb9cce4074 2024-12-12T05:40:03,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/f2d30aac4f1a497d93d86d5f221c5da4 is 50, key is test_row_1/B:col10/1733982001794/Put/seqid=0 2024-12-12T05:40:03,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742096_1272 (size=9657) 2024-12-12T05:40:03,231 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:03,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982063231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:03,232 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:03,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982063231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:03,234 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:03,234 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:03,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982063232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:03,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982063232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:03,234 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:03,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982063233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:03,513 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/f2d30aac4f1a497d93d86d5f221c5da4 2024-12-12T05:40:03,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/b3463fd0a0c94c3da7fd062f1de9e726 is 50, key is test_row_1/C:col10/1733982001794/Put/seqid=0 2024-12-12T05:40:03,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742097_1273 (size=9657) 2024-12-12T05:40:03,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:03,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982063534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:03,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:03,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982063534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:03,536 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:03,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982063534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:03,538 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:03,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982063535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:03,538 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:03,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982063535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:03,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-12T05:40:03,923 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/b3463fd0a0c94c3da7fd062f1de9e726 2024-12-12T05:40:03,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/4fba4c77a3734f3ebb1b90bb9cce4074 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/4fba4c77a3734f3ebb1b90bb9cce4074 2024-12-12T05:40:03,929 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/4fba4c77a3734f3ebb1b90bb9cce4074, entries=100, sequenceid=91, filesize=9.4 K 2024-12-12T05:40:03,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/f2d30aac4f1a497d93d86d5f221c5da4 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/f2d30aac4f1a497d93d86d5f221c5da4 2024-12-12T05:40:03,934 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/f2d30aac4f1a497d93d86d5f221c5da4, entries=100, sequenceid=91, filesize=9.4 K 2024-12-12T05:40:03,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/b3463fd0a0c94c3da7fd062f1de9e726 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/b3463fd0a0c94c3da7fd062f1de9e726 2024-12-12T05:40:03,939 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/b3463fd0a0c94c3da7fd062f1de9e726, entries=100, sequenceid=91, filesize=9.4 K 2024-12-12T05:40:03,940 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 7b5eb541c096811fd997fc2b7e27d07f in 1248ms, sequenceid=91, compaction requested=true 2024-12-12T05:40:03,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:03,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:03,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-12-12T05:40:03,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-12-12T05:40:03,942 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-12-12T05:40:03,942 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1700 sec 2024-12-12T05:40:03,943 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 2.1730 sec 2024-12-12T05:40:04,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:04,038 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b5eb541c096811fd997fc2b7e27d07f 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T05:40:04,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=A 2024-12-12T05:40:04,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:04,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=B 2024-12-12T05:40:04,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:04,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=C 2024-12-12T05:40:04,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:04,043 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/c297fc4f20da4b51a5ae8b7cb5a663c3 is 50, key is test_row_0/A:col10/1733982002925/Put/seqid=0 2024-12-12T05:40:04,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742098_1274 (size=14341) 2024-12-12T05:40:04,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:04,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982064044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:04,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:04,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982064044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:04,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:04,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982064044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:04,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:04,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982064045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:04,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:04,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982064046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:04,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:04,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982064148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:04,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:04,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982064148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:04,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:04,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982064148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:04,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:04,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982064148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:04,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:04,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982064148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:04,172 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-12T05:40:04,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:04,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982064350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:04,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:04,352 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:04,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982064351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:04,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982064351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:04,353 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:04,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982064351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:04,353 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:04,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982064352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:04,447 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/c297fc4f20da4b51a5ae8b7cb5a663c3 2024-12-12T05:40:04,454 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/11285ba0475a4ab298779786f2205387 is 50, key is test_row_0/B:col10/1733982002925/Put/seqid=0 2024-12-12T05:40:04,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742099_1275 (size=12001) 2024-12-12T05:40:04,463 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/11285ba0475a4ab298779786f2205387 2024-12-12T05:40:04,470 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/bcc2db790fc14a16af140100bb9c14f3 is 50, key is test_row_0/C:col10/1733982002925/Put/seqid=0 2024-12-12T05:40:04,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742100_1276 (size=12001) 2024-12-12T05:40:04,654 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:04,654 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:04,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982064653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:04,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982064653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:04,655 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:04,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982064654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:04,655 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:04,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982064654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:04,656 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:04,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982064654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:04,890 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/bcc2db790fc14a16af140100bb9c14f3 2024-12-12T05:40:04,894 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/c297fc4f20da4b51a5ae8b7cb5a663c3 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/c297fc4f20da4b51a5ae8b7cb5a663c3 2024-12-12T05:40:04,897 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/c297fc4f20da4b51a5ae8b7cb5a663c3, entries=200, sequenceid=118, filesize=14.0 K 2024-12-12T05:40:04,897 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/11285ba0475a4ab298779786f2205387 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/11285ba0475a4ab298779786f2205387 2024-12-12T05:40:04,900 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/11285ba0475a4ab298779786f2205387, entries=150, sequenceid=118, filesize=11.7 K 2024-12-12T05:40:04,901 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/bcc2db790fc14a16af140100bb9c14f3 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/bcc2db790fc14a16af140100bb9c14f3 2024-12-12T05:40:04,904 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/bcc2db790fc14a16af140100bb9c14f3, entries=150, sequenceid=118, filesize=11.7 K 2024-12-12T05:40:04,905 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 7b5eb541c096811fd997fc2b7e27d07f in 867ms, sequenceid=118, compaction requested=true 2024-12-12T05:40:04,905 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:04,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b5eb541c096811fd997fc2b7e27d07f:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:40:04,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:04,905 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T05:40:04,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b5eb541c096811fd997fc2b7e27d07f:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:40:04,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:04,905 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T05:40:04,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b5eb541c096811fd997fc2b7e27d07f:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:40:04,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:40:04,906 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48103 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T05:40:04,906 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45763 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T05:40:04,906 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 7b5eb541c096811fd997fc2b7e27d07f/A is initiating minor compaction (all files) 2024-12-12T05:40:04,906 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 7b5eb541c096811fd997fc2b7e27d07f/B is initiating minor compaction (all files) 2024-12-12T05:40:04,906 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b5eb541c096811fd997fc2b7e27d07f/A in TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:04,906 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b5eb541c096811fd997fc2b7e27d07f/B in TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:04,906 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/d20b45abc55647cabfc63a1abae2aff4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/b9b5b8517a7b4635a7d72c6366956aee, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/4fba4c77a3734f3ebb1b90bb9cce4074, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/c297fc4f20da4b51a5ae8b7cb5a663c3] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp, totalSize=47.0 K 2024-12-12T05:40:04,906 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/0e05a93d04034f5db25c7bfe859cc3b9, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/c7cee429460c4eb5a2ed20a03bee8f22, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/f2d30aac4f1a497d93d86d5f221c5da4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/11285ba0475a4ab298779786f2205387] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp, totalSize=44.7 K 2024-12-12T05:40:04,907 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting d20b45abc55647cabfc63a1abae2aff4, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733982001551 2024-12-12T05:40:04,907 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e05a93d04034f5db25c7bfe859cc3b9, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733982001551 2024-12-12T05:40:04,907 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting c7cee429460c4eb5a2ed20a03bee8f22, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733982001679 2024-12-12T05:40:04,907 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting b9b5b8517a7b4635a7d72c6366956aee, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733982001679 2024-12-12T05:40:04,907 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting f2d30aac4f1a497d93d86d5f221c5da4, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733982001794 2024-12-12T05:40:04,907 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4fba4c77a3734f3ebb1b90bb9cce4074, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733982001794 2024-12-12T05:40:04,907 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 11285ba0475a4ab298779786f2205387, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733982002925 2024-12-12T05:40:04,907 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting c297fc4f20da4b51a5ae8b7cb5a663c3, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733982002922 2024-12-12T05:40:04,915 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b5eb541c096811fd997fc2b7e27d07f#B#compaction#228 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:04,915 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/c08f42e3fcfb4630b49c1edf7964d4de is 50, key is test_row_0/B:col10/1733982002925/Put/seqid=0 2024-12-12T05:40:04,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742101_1277 (size=12241) 2024-12-12T05:40:04,926 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b5eb541c096811fd997fc2b7e27d07f#A#compaction#229 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:04,926 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/79efab4ee8524a0ea816806b6bc7c2c1 is 50, key is test_row_0/A:col10/1733982002925/Put/seqid=0 2024-12-12T05:40:04,929 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/c08f42e3fcfb4630b49c1edf7964d4de as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/c08f42e3fcfb4630b49c1edf7964d4de 2024-12-12T05:40:04,934 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7b5eb541c096811fd997fc2b7e27d07f/B of 7b5eb541c096811fd997fc2b7e27d07f into c08f42e3fcfb4630b49c1edf7964d4de(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:04,934 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:04,934 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., storeName=7b5eb541c096811fd997fc2b7e27d07f/B, priority=12, startTime=1733982004905; duration=0sec 2024-12-12T05:40:04,934 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:40:04,934 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b5eb541c096811fd997fc2b7e27d07f:B 2024-12-12T05:40:04,934 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T05:40:04,935 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45763 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T05:40:04,935 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 7b5eb541c096811fd997fc2b7e27d07f/C is initiating minor compaction (all files) 2024-12-12T05:40:04,936 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b5eb541c096811fd997fc2b7e27d07f/C in TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:04,936 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/f61154f9f1a2468ca138373d718e6baa, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/0a955b1ebc664a8b9311c292233aeba6, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/b3463fd0a0c94c3da7fd062f1de9e726, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/bcc2db790fc14a16af140100bb9c14f3] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp, totalSize=44.7 K 2024-12-12T05:40:04,936 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting f61154f9f1a2468ca138373d718e6baa, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733982001551 2024-12-12T05:40:04,936 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a955b1ebc664a8b9311c292233aeba6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733982001679 2024-12-12T05:40:04,936 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting b3463fd0a0c94c3da7fd062f1de9e726, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733982001794 2024-12-12T05:40:04,937 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting bcc2db790fc14a16af140100bb9c14f3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733982002925 2024-12-12T05:40:04,949 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b5eb541c096811fd997fc2b7e27d07f#C#compaction#230 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:04,949 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/98b56d75e3f049ed88120885242850bb is 50, key is test_row_0/C:col10/1733982002925/Put/seqid=0 2024-12-12T05:40:04,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742102_1278 (size=12241) 2024-12-12T05:40:04,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742103_1279 (size=12241) 2024-12-12T05:40:04,974 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/98b56d75e3f049ed88120885242850bb as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/98b56d75e3f049ed88120885242850bb 2024-12-12T05:40:04,978 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7b5eb541c096811fd997fc2b7e27d07f/C of 7b5eb541c096811fd997fc2b7e27d07f into 98b56d75e3f049ed88120885242850bb(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:04,978 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:04,978 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., storeName=7b5eb541c096811fd997fc2b7e27d07f/C, priority=12, startTime=1733982004905; duration=0sec 2024-12-12T05:40:04,978 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:04,978 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b5eb541c096811fd997fc2b7e27d07f:C 2024-12-12T05:40:05,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:05,158 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b5eb541c096811fd997fc2b7e27d07f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T05:40:05,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=A 2024-12-12T05:40:05,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:05,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=B 2024-12-12T05:40:05,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:05,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=C 2024-12-12T05:40:05,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:05,173 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:05,173 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:05,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982065170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:05,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982065170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:05,173 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:05,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982065170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:05,173 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:05,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982065171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:05,176 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:05,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982065173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:05,190 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/848b1dd989224999ac8d2deabb6461ae is 50, key is test_row_0/A:col10/1733982005155/Put/seqid=0 2024-12-12T05:40:05,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742104_1280 (size=12101) 2024-12-12T05:40:05,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:05,275 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:05,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982065274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:05,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982065274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:05,276 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:05,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982065274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:05,276 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:05,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982065274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:05,279 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:05,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982065277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:05,357 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/79efab4ee8524a0ea816806b6bc7c2c1 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/79efab4ee8524a0ea816806b6bc7c2c1 2024-12-12T05:40:05,361 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7b5eb541c096811fd997fc2b7e27d07f/A of 7b5eb541c096811fd997fc2b7e27d07f into 79efab4ee8524a0ea816806b6bc7c2c1(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:05,361 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:05,361 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., storeName=7b5eb541c096811fd997fc2b7e27d07f/A, priority=12, startTime=1733982004905; duration=0sec 2024-12-12T05:40:05,361 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:05,361 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b5eb541c096811fd997fc2b7e27d07f:A 2024-12-12T05:40:05,478 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:05,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982065476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:05,478 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:05,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982065477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:05,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:05,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982065477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:05,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:05,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982065477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:05,481 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:05,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982065480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:05,595 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/848b1dd989224999ac8d2deabb6461ae 2024-12-12T05:40:05,602 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/4a5e12392cb94711aea8af66049c0e8d is 50, key is test_row_0/B:col10/1733982005155/Put/seqid=0 2024-12-12T05:40:05,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742105_1281 (size=12101) 2024-12-12T05:40:05,780 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:05,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982065780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:05,781 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:05,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982065781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:05,781 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:05,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982065781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:05,783 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:05,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982065782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:05,784 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:05,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982065783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:05,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-12T05:40:05,877 INFO [Thread-1180 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-12-12T05:40:05,877 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:40:05,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-12-12T05:40:05,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-12T05:40:05,879 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:40:05,879 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:40:05,879 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:40:05,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-12T05:40:06,005 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/4a5e12392cb94711aea8af66049c0e8d 2024-12-12T05:40:06,011 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/f48ad361d8964a79adb9fd0840990a19 is 50, key is test_row_0/C:col10/1733982005155/Put/seqid=0 2024-12-12T05:40:06,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742106_1282 (size=12101) 2024-12-12T05:40:06,030 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:06,030 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-12T05:40:06,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:06,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:06,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:06,031 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:06,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:06,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:06,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-12T05:40:06,185 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:06,186 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-12T05:40:06,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:06,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:06,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:06,186 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:06,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:06,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:06,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:06,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982066282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:06,285 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:06,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982066284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:06,285 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:06,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982066284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:06,285 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:06,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982066284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:06,286 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:06,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982066285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:06,338 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:06,338 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-12T05:40:06,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:06,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:06,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:06,338 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:06,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:06,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:06,415 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/f48ad361d8964a79adb9fd0840990a19 2024-12-12T05:40:06,419 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/848b1dd989224999ac8d2deabb6461ae as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/848b1dd989224999ac8d2deabb6461ae 2024-12-12T05:40:06,422 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/848b1dd989224999ac8d2deabb6461ae, entries=150, sequenceid=132, filesize=11.8 K 2024-12-12T05:40:06,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/4a5e12392cb94711aea8af66049c0e8d as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/4a5e12392cb94711aea8af66049c0e8d 2024-12-12T05:40:06,427 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/4a5e12392cb94711aea8af66049c0e8d, entries=150, sequenceid=132, filesize=11.8 K 2024-12-12T05:40:06,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/f48ad361d8964a79adb9fd0840990a19 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/f48ad361d8964a79adb9fd0840990a19 2024-12-12T05:40:06,430 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/f48ad361d8964a79adb9fd0840990a19, entries=150, sequenceid=132, filesize=11.8 K 2024-12-12T05:40:06,431 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 7b5eb541c096811fd997fc2b7e27d07f in 1274ms, sequenceid=132, compaction requested=false 2024-12-12T05:40:06,431 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:06,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-12T05:40:06,490 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:06,490 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-12T05:40:06,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:06,491 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 7b5eb541c096811fd997fc2b7e27d07f 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T05:40:06,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=A 2024-12-12T05:40:06,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:06,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=B 2024-12-12T05:40:06,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:06,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=C 2024-12-12T05:40:06,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:06,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/310b0063b69746b8b3125c13c26ad355 is 50, key is test_row_0/A:col10/1733982005172/Put/seqid=0 2024-12-12T05:40:06,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742107_1283 (size=12151) 2024-12-12T05:40:06,899 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/310b0063b69746b8b3125c13c26ad355 2024-12-12T05:40:06,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/c5ca8af080aa48e6a5c2bccc1ff01c67 is 50, key is test_row_0/B:col10/1733982005172/Put/seqid=0 2024-12-12T05:40:06,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742108_1284 (size=12151) 2024-12-12T05:40:06,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-12T05:40:07,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:07,288 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:07,297 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:07,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982067295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:07,297 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:07,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982067295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:07,298 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:07,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982067296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:07,298 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:07,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982067296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:07,298 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:07,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982067297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:07,313 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/c5ca8af080aa48e6a5c2bccc1ff01c67 2024-12-12T05:40:07,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/4fe77eb7e6ae4017bab8f2ae99f93d72 is 50, key is test_row_0/C:col10/1733982005172/Put/seqid=0 2024-12-12T05:40:07,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742109_1285 (size=12151) 2024-12-12T05:40:07,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:07,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982067398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:07,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:07,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982067398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:07,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:07,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982067398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:07,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:07,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982067399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:07,601 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:07,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982067600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:07,601 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:07,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982067600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:07,602 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:07,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982067600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:07,603 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:07,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982067601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:07,722 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/4fe77eb7e6ae4017bab8f2ae99f93d72 2024-12-12T05:40:07,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/310b0063b69746b8b3125c13c26ad355 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/310b0063b69746b8b3125c13c26ad355 2024-12-12T05:40:07,729 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/310b0063b69746b8b3125c13c26ad355, entries=150, sequenceid=157, filesize=11.9 K 2024-12-12T05:40:07,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/c5ca8af080aa48e6a5c2bccc1ff01c67 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/c5ca8af080aa48e6a5c2bccc1ff01c67 2024-12-12T05:40:07,733 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/c5ca8af080aa48e6a5c2bccc1ff01c67, entries=150, sequenceid=157, filesize=11.9 K 2024-12-12T05:40:07,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/4fe77eb7e6ae4017bab8f2ae99f93d72 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/4fe77eb7e6ae4017bab8f2ae99f93d72 2024-12-12T05:40:07,737 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/4fe77eb7e6ae4017bab8f2ae99f93d72, entries=150, sequenceid=157, filesize=11.9 K 2024-12-12T05:40:07,738 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 7b5eb541c096811fd997fc2b7e27d07f in 1247ms, sequenceid=157, compaction requested=true 2024-12-12T05:40:07,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:07,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:07,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-12-12T05:40:07,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-12-12T05:40:07,740 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-12-12T05:40:07,740 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8600 sec 2024-12-12T05:40:07,741 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 1.8630 sec 2024-12-12T05:40:07,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:07,905 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b5eb541c096811fd997fc2b7e27d07f 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-12T05:40:07,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=A 2024-12-12T05:40:07,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:07,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=B 2024-12-12T05:40:07,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:07,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=C 2024-12-12T05:40:07,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:07,910 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/d419f3aa52c8443fb371ad044b4b5827 is 50, key is test_row_0/A:col10/1733982007905/Put/seqid=0 2024-12-12T05:40:07,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742110_1286 (size=12151) 2024-12-12T05:40:07,922 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:07,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:07,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982067919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:07,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982067919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:07,923 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:07,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:07,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982067920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:07,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982067920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:07,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-12T05:40:07,982 INFO [Thread-1180 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-12-12T05:40:07,983 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:40:07,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-12-12T05:40:07,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-12T05:40:07,984 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:40:07,984 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:40:07,984 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:40:08,026 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:08,026 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:08,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982068023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:08,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982068023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:08,026 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:08,026 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:08,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982068024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:08,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982068024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:08,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-12T05:40:08,135 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:08,135 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-12T05:40:08,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:08,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:08,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:08,136 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:08,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:08,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:08,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:08,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982068227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:08,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:08,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982068227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:08,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:08,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982068227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:08,230 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:08,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982068228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:08,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-12T05:40:08,287 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:08,288 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-12T05:40:08,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:08,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:08,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:08,288 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:08,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:08,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:08,314 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/d419f3aa52c8443fb371ad044b4b5827 2024-12-12T05:40:08,320 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/8ba61d1a261947c5a31d6898f9eac5be is 50, key is test_row_0/B:col10/1733982007905/Put/seqid=0 2024-12-12T05:40:08,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742111_1287 (size=12151) 2024-12-12T05:40:08,440 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:08,441 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-12T05:40:08,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:08,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:08,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:08,441 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:08,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:08,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:08,530 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:08,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982068529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:08,530 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:08,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982068529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:08,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:08,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982068531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:08,533 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:08,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982068531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:08,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-12T05:40:08,593 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:08,593 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-12T05:40:08,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:08,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:08,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:08,593 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:08,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:08,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:08,725 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/8ba61d1a261947c5a31d6898f9eac5be 2024-12-12T05:40:08,730 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/89e5b2503d7c4805b569bf0205376414 is 50, key is test_row_0/C:col10/1733982007905/Put/seqid=0 2024-12-12T05:40:08,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742112_1288 (size=12151) 2024-12-12T05:40:08,745 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:08,745 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-12T05:40:08,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:08,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:08,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:08,746 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:08,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:08,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:08,897 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:08,898 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-12T05:40:08,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:08,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:08,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:08,898 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:08,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:08,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:09,033 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:09,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982069033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:09,034 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:09,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982069033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:09,036 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:09,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982069035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:09,036 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:09,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982069035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:09,050 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:09,050 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-12T05:40:09,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:09,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:09,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:09,050 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:09,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:09,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:09,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-12T05:40:09,134 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/89e5b2503d7c4805b569bf0205376414 2024-12-12T05:40:09,138 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/d419f3aa52c8443fb371ad044b4b5827 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/d419f3aa52c8443fb371ad044b4b5827 2024-12-12T05:40:09,141 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/d419f3aa52c8443fb371ad044b4b5827, entries=150, sequenceid=171, filesize=11.9 K 2024-12-12T05:40:09,142 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/8ba61d1a261947c5a31d6898f9eac5be as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/8ba61d1a261947c5a31d6898f9eac5be 2024-12-12T05:40:09,145 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/8ba61d1a261947c5a31d6898f9eac5be, entries=150, sequenceid=171, filesize=11.9 K 2024-12-12T05:40:09,145 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/89e5b2503d7c4805b569bf0205376414 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/89e5b2503d7c4805b569bf0205376414 2024-12-12T05:40:09,149 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/89e5b2503d7c4805b569bf0205376414, entries=150, sequenceid=171, filesize=11.9 K 2024-12-12T05:40:09,150 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 7b5eb541c096811fd997fc2b7e27d07f in 1245ms, sequenceid=171, compaction requested=true 2024-12-12T05:40:09,150 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:09,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b5eb541c096811fd997fc2b7e27d07f:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:40:09,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:09,150 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T05:40:09,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b5eb541c096811fd997fc2b7e27d07f:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:40:09,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:09,150 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T05:40:09,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b5eb541c096811fd997fc2b7e27d07f:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:40:09,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:40:09,151 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48644 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T05:40:09,151 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 7b5eb541c096811fd997fc2b7e27d07f/B is initiating minor compaction (all files) 2024-12-12T05:40:09,151 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b5eb541c096811fd997fc2b7e27d07f/B in TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:09,152 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/c08f42e3fcfb4630b49c1edf7964d4de, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/4a5e12392cb94711aea8af66049c0e8d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/c5ca8af080aa48e6a5c2bccc1ff01c67, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/8ba61d1a261947c5a31d6898f9eac5be] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp, totalSize=47.5 K 2024-12-12T05:40:09,152 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48644 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T05:40:09,152 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 7b5eb541c096811fd997fc2b7e27d07f/A is initiating minor compaction (all files) 2024-12-12T05:40:09,152 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b5eb541c096811fd997fc2b7e27d07f/A in TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:09,152 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/79efab4ee8524a0ea816806b6bc7c2c1, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/848b1dd989224999ac8d2deabb6461ae, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/310b0063b69746b8b3125c13c26ad355, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/d419f3aa52c8443fb371ad044b4b5827] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp, totalSize=47.5 K 2024-12-12T05:40:09,152 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting c08f42e3fcfb4630b49c1edf7964d4de, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733982002925 2024-12-12T05:40:09,152 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79efab4ee8524a0ea816806b6bc7c2c1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733982002925 2024-12-12T05:40:09,152 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 4a5e12392cb94711aea8af66049c0e8d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733982004043 2024-12-12T05:40:09,152 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 848b1dd989224999ac8d2deabb6461ae, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733982004043 2024-12-12T05:40:09,153 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting c5ca8af080aa48e6a5c2bccc1ff01c67, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733982005167 2024-12-12T05:40:09,153 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 310b0063b69746b8b3125c13c26ad355, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733982005167 2024-12-12T05:40:09,153 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ba61d1a261947c5a31d6898f9eac5be, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733982007294 2024-12-12T05:40:09,153 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting d419f3aa52c8443fb371ad044b4b5827, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733982007294 2024-12-12T05:40:09,160 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b5eb541c096811fd997fc2b7e27d07f#B#compaction#241 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:09,160 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b5eb541c096811fd997fc2b7e27d07f#A#compaction#240 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:09,161 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/570ef6a7701f4d0d863b63399c40e529 is 50, key is test_row_0/A:col10/1733982007905/Put/seqid=0 2024-12-12T05:40:09,161 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/39b3e0e63c9943f583d27518ee9398b1 is 50, key is test_row_0/B:col10/1733982007905/Put/seqid=0 2024-12-12T05:40:09,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742114_1290 (size=12527) 2024-12-12T05:40:09,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742113_1289 (size=12527) 2024-12-12T05:40:09,202 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:09,202 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-12T05:40:09,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:09,202 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 7b5eb541c096811fd997fc2b7e27d07f 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-12T05:40:09,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=A 2024-12-12T05:40:09,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:09,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=B 2024-12-12T05:40:09,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:09,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=C 2024-12-12T05:40:09,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:09,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/1baca4f92667460e91b54013fb8306ba is 50, key is test_row_0/A:col10/1733982007913/Put/seqid=0 2024-12-12T05:40:09,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742115_1291 (size=12151) 2024-12-12T05:40:09,308 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:09,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:09,333 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:09,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982069331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:09,436 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:09,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982069434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:09,570 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/39b3e0e63c9943f583d27518ee9398b1 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/39b3e0e63c9943f583d27518ee9398b1 2024-12-12T05:40:09,575 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7b5eb541c096811fd997fc2b7e27d07f/B of 7b5eb541c096811fd997fc2b7e27d07f into 39b3e0e63c9943f583d27518ee9398b1(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:09,575 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:09,575 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., storeName=7b5eb541c096811fd997fc2b7e27d07f/B, priority=12, startTime=1733982009150; duration=0sec 2024-12-12T05:40:09,575 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:40:09,575 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b5eb541c096811fd997fc2b7e27d07f:B 2024-12-12T05:40:09,575 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T05:40:09,576 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48644 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T05:40:09,576 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 7b5eb541c096811fd997fc2b7e27d07f/C is initiating minor compaction (all files) 2024-12-12T05:40:09,576 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b5eb541c096811fd997fc2b7e27d07f/C in TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:09,576 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/98b56d75e3f049ed88120885242850bb, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/f48ad361d8964a79adb9fd0840990a19, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/4fe77eb7e6ae4017bab8f2ae99f93d72, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/89e5b2503d7c4805b569bf0205376414] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp, totalSize=47.5 K 2024-12-12T05:40:09,576 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 98b56d75e3f049ed88120885242850bb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733982002925 2024-12-12T05:40:09,577 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting f48ad361d8964a79adb9fd0840990a19, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733982004043 2024-12-12T05:40:09,577 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 4fe77eb7e6ae4017bab8f2ae99f93d72, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733982005167 2024-12-12T05:40:09,577 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 89e5b2503d7c4805b569bf0205376414, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733982007294 2024-12-12T05:40:09,582 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/570ef6a7701f4d0d863b63399c40e529 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/570ef6a7701f4d0d863b63399c40e529 2024-12-12T05:40:09,584 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b5eb541c096811fd997fc2b7e27d07f#C#compaction#243 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:09,585 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/e98dfbd145f04347b71b57a73ce8e5d0 is 50, key is test_row_0/C:col10/1733982007905/Put/seqid=0 2024-12-12T05:40:09,588 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7b5eb541c096811fd997fc2b7e27d07f/A of 7b5eb541c096811fd997fc2b7e27d07f into 570ef6a7701f4d0d863b63399c40e529(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:09,588 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:09,589 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., storeName=7b5eb541c096811fd997fc2b7e27d07f/A, priority=12, startTime=1733982009150; duration=0sec 2024-12-12T05:40:09,589 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:09,589 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b5eb541c096811fd997fc2b7e27d07f:A 2024-12-12T05:40:09,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742116_1292 (size=12527) 2024-12-12T05:40:09,610 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/1baca4f92667460e91b54013fb8306ba 2024-12-12T05:40:09,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/4ed4a9af754640d3ab89686d2ac558f0 is 50, key is test_row_0/B:col10/1733982007913/Put/seqid=0 2024-12-12T05:40:09,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742117_1293 (size=12151) 2024-12-12T05:40:09,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:09,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982069637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:09,940 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:09,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982069939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:09,995 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/e98dfbd145f04347b71b57a73ce8e5d0 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/e98dfbd145f04347b71b57a73ce8e5d0 2024-12-12T05:40:09,998 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7b5eb541c096811fd997fc2b7e27d07f/C of 7b5eb541c096811fd997fc2b7e27d07f into e98dfbd145f04347b71b57a73ce8e5d0(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:09,998 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:09,999 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., storeName=7b5eb541c096811fd997fc2b7e27d07f/C, priority=12, startTime=1733982009150; duration=0sec 2024-12-12T05:40:09,999 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:09,999 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b5eb541c096811fd997fc2b7e27d07f:C 2024-12-12T05:40:10,019 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/4ed4a9af754640d3ab89686d2ac558f0 2024-12-12T05:40:10,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/bf0051e0f0ff4eef8a0e0967037224d0 is 50, key is test_row_0/C:col10/1733982007913/Put/seqid=0 2024-12-12T05:40:10,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742118_1294 (size=12151) 2024-12-12T05:40:10,035 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:10,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982070034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:10,040 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:10,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982070040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:10,042 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:10,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982070041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:10,045 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:10,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982070045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:10,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-12T05:40:10,429 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/bf0051e0f0ff4eef8a0e0967037224d0 2024-12-12T05:40:10,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:10,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982070444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:10,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/1baca4f92667460e91b54013fb8306ba as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/1baca4f92667460e91b54013fb8306ba 2024-12-12T05:40:10,477 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/1baca4f92667460e91b54013fb8306ba, entries=150, sequenceid=193, filesize=11.9 K 2024-12-12T05:40:10,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/4ed4a9af754640d3ab89686d2ac558f0 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/4ed4a9af754640d3ab89686d2ac558f0 2024-12-12T05:40:10,482 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/4ed4a9af754640d3ab89686d2ac558f0, entries=150, sequenceid=193, filesize=11.9 K 2024-12-12T05:40:10,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/bf0051e0f0ff4eef8a0e0967037224d0 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/bf0051e0f0ff4eef8a0e0967037224d0 2024-12-12T05:40:10,486 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/bf0051e0f0ff4eef8a0e0967037224d0, entries=150, sequenceid=193, filesize=11.9 K 2024-12-12T05:40:10,487 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 7b5eb541c096811fd997fc2b7e27d07f in 1285ms, sequenceid=193, compaction requested=false 2024-12-12T05:40:10,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:10,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:10,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-12T05:40:10,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-12-12T05:40:10,489 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-12-12T05:40:10,489 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5040 sec 2024-12-12T05:40:10,490 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 2.5070 sec 2024-12-12T05:40:11,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:11,453 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b5eb541c096811fd997fc2b7e27d07f 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-12T05:40:11,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=A 2024-12-12T05:40:11,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:11,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=B 2024-12-12T05:40:11,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:11,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=C 2024-12-12T05:40:11,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:11,456 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/5f3cb66c741246be983e819bc0df569f is 50, key is test_row_0/A:col10/1733982009328/Put/seqid=0 2024-12-12T05:40:11,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742119_1295 (size=12151) 2024-12-12T05:40:11,493 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:11,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982071491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:11,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:11,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982071594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:11,796 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:11,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982071796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:11,860 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/5f3cb66c741246be983e819bc0df569f 2024-12-12T05:40:11,866 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/fbcae38606194273a1c64248f0217ce9 is 50, key is test_row_0/B:col10/1733982009328/Put/seqid=0 2024-12-12T05:40:11,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742120_1296 (size=12151) 2024-12-12T05:40:12,045 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:12,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982072044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:12,045 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:12,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982072044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:12,045 DEBUG [Thread-1172 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4126 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., hostname=83e80bf221ca,46457,1733981928566, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:40:12,046 DEBUG [Thread-1174 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4126 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., hostname=83e80bf221ca,46457,1733981928566, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:40:12,055 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:12,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982072054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:12,055 DEBUG [Thread-1170 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4136 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., hostname=83e80bf221ca,46457,1733981928566, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:40:12,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:12,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982072058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:12,060 DEBUG [Thread-1178 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4141 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., hostname=83e80bf221ca,46457,1733981928566, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:40:12,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-12T05:40:12,088 INFO [Thread-1180 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-12-12T05:40:12,089 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:40:12,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-12-12T05:40:12,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-12T05:40:12,090 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:40:12,090 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:40:12,090 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:40:12,098 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:12,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982072097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:12,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-12T05:40:12,241 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:12,241 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-12T05:40:12,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:12,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:12,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:12,242 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:12,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:12,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:12,270 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/fbcae38606194273a1c64248f0217ce9 2024-12-12T05:40:12,276 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/8b623dca5a7d4cba92bb4e2162ecfa6d is 50, key is test_row_0/C:col10/1733982009328/Put/seqid=0 2024-12-12T05:40:12,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742121_1297 (size=12151) 2024-12-12T05:40:12,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-12T05:40:12,393 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:12,394 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-12T05:40:12,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:12,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:12,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:12,394 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:12,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:12,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:12,546 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:12,546 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-12T05:40:12,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:12,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:12,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:12,546 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:12,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:12,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:12,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:12,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982072602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:12,680 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/8b623dca5a7d4cba92bb4e2162ecfa6d 2024-12-12T05:40:12,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/5f3cb66c741246be983e819bc0df569f as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/5f3cb66c741246be983e819bc0df569f 2024-12-12T05:40:12,687 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/5f3cb66c741246be983e819bc0df569f, entries=150, sequenceid=211, filesize=11.9 K 2024-12-12T05:40:12,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/fbcae38606194273a1c64248f0217ce9 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/fbcae38606194273a1c64248f0217ce9 2024-12-12T05:40:12,691 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/fbcae38606194273a1c64248f0217ce9, entries=150, sequenceid=211, filesize=11.9 K 2024-12-12T05:40:12,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-12T05:40:12,691 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/8b623dca5a7d4cba92bb4e2162ecfa6d as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/8b623dca5a7d4cba92bb4e2162ecfa6d 2024-12-12T05:40:12,695 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/8b623dca5a7d4cba92bb4e2162ecfa6d, entries=150, sequenceid=211, filesize=11.9 K 2024-12-12T05:40:12,695 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 7b5eb541c096811fd997fc2b7e27d07f in 1243ms, sequenceid=211, compaction requested=true 2024-12-12T05:40:12,695 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:12,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b5eb541c096811fd997fc2b7e27d07f:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:40:12,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:12,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b5eb541c096811fd997fc2b7e27d07f:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:40:12,696 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:12,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:12,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b5eb541c096811fd997fc2b7e27d07f:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:40:12,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:40:12,696 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:12,696 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:12,696 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:12,697 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 7b5eb541c096811fd997fc2b7e27d07f/B is initiating minor compaction (all files) 2024-12-12T05:40:12,697 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 7b5eb541c096811fd997fc2b7e27d07f/A is initiating minor compaction (all files) 2024-12-12T05:40:12,697 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b5eb541c096811fd997fc2b7e27d07f/B in TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:12,697 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b5eb541c096811fd997fc2b7e27d07f/A in TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:12,697 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/570ef6a7701f4d0d863b63399c40e529, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/1baca4f92667460e91b54013fb8306ba, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/5f3cb66c741246be983e819bc0df569f] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp, totalSize=36.0 K 2024-12-12T05:40:12,697 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/39b3e0e63c9943f583d27518ee9398b1, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/4ed4a9af754640d3ab89686d2ac558f0, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/fbcae38606194273a1c64248f0217ce9] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp, totalSize=36.0 K 2024-12-12T05:40:12,697 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 570ef6a7701f4d0d863b63399c40e529, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733982007294 2024-12-12T05:40:12,697 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 39b3e0e63c9943f583d27518ee9398b1, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733982007294 2024-12-12T05:40:12,697 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1baca4f92667460e91b54013fb8306ba, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733982007913 2024-12-12T05:40:12,697 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ed4a9af754640d3ab89686d2ac558f0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733982007913 2024-12-12T05:40:12,697 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5f3cb66c741246be983e819bc0df569f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733982009328 2024-12-12T05:40:12,697 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting fbcae38606194273a1c64248f0217ce9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733982009328 2024-12-12T05:40:12,698 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:12,698 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-12T05:40:12,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:12,699 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 7b5eb541c096811fd997fc2b7e27d07f 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-12T05:40:12,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=A 2024-12-12T05:40:12,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:12,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=B 2024-12-12T05:40:12,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:12,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=C 2024-12-12T05:40:12,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:12,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/be7f1b853e86422e8b4636d1dce3c9c6 is 50, key is test_row_0/A:col10/1733982011490/Put/seqid=0 2024-12-12T05:40:12,705 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b5eb541c096811fd997fc2b7e27d07f#A#compaction#250 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:12,705 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b5eb541c096811fd997fc2b7e27d07f#B#compaction#251 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:12,705 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/fb897a2872ca45478719a234cfec4849 is 50, key is test_row_0/A:col10/1733982009328/Put/seqid=0 2024-12-12T05:40:12,705 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/01293c94a23e4677a91808899f844188 is 50, key is test_row_0/B:col10/1733982009328/Put/seqid=0 2024-12-12T05:40:12,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742122_1298 (size=12151) 2024-12-12T05:40:12,710 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/be7f1b853e86422e8b4636d1dce3c9c6 2024-12-12T05:40:12,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742124_1300 (size=12629) 2024-12-12T05:40:12,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742123_1299 (size=12629) 2024-12-12T05:40:12,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/4b517a299dcf428b96305ab6cab0c172 is 50, key is test_row_0/B:col10/1733982011490/Put/seqid=0 2024-12-12T05:40:12,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742125_1301 (size=12151) 2024-12-12T05:40:13,127 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/01293c94a23e4677a91808899f844188 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/01293c94a23e4677a91808899f844188 2024-12-12T05:40:13,131 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b5eb541c096811fd997fc2b7e27d07f/B of 7b5eb541c096811fd997fc2b7e27d07f into 01293c94a23e4677a91808899f844188(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:13,131 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:13,131 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., storeName=7b5eb541c096811fd997fc2b7e27d07f/B, priority=13, startTime=1733982012696; duration=0sec 2024-12-12T05:40:13,131 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:40:13,131 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b5eb541c096811fd997fc2b7e27d07f:B 2024-12-12T05:40:13,131 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:13,132 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:13,132 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 7b5eb541c096811fd997fc2b7e27d07f/C is initiating minor compaction (all files) 2024-12-12T05:40:13,132 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b5eb541c096811fd997fc2b7e27d07f/C in TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:13,132 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/e98dfbd145f04347b71b57a73ce8e5d0, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/bf0051e0f0ff4eef8a0e0967037224d0, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/8b623dca5a7d4cba92bb4e2162ecfa6d] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp, totalSize=36.0 K 2024-12-12T05:40:13,132 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting e98dfbd145f04347b71b57a73ce8e5d0, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733982007294 2024-12-12T05:40:13,133 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting bf0051e0f0ff4eef8a0e0967037224d0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733982007913 2024-12-12T05:40:13,133 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b623dca5a7d4cba92bb4e2162ecfa6d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733982009328 2024-12-12T05:40:13,136 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/fb897a2872ca45478719a234cfec4849 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/fb897a2872ca45478719a234cfec4849 2024-12-12T05:40:13,141 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b5eb541c096811fd997fc2b7e27d07f#C#compaction#253 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:13,141 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b5eb541c096811fd997fc2b7e27d07f/A of 7b5eb541c096811fd997fc2b7e27d07f into fb897a2872ca45478719a234cfec4849(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:13,141 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:13,141 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., storeName=7b5eb541c096811fd997fc2b7e27d07f/A, priority=13, startTime=1733982012696; duration=0sec 2024-12-12T05:40:13,141 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:13,141 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b5eb541c096811fd997fc2b7e27d07f:A 2024-12-12T05:40:13,141 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/8d46c9f598604c5fbf0655ca4851717e is 50, key is test_row_0/C:col10/1733982009328/Put/seqid=0 2024-12-12T05:40:13,144 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/4b517a299dcf428b96305ab6cab0c172 2024-12-12T05:40:13,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742126_1302 (size=12629) 2024-12-12T05:40:13,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/a7b1d79b647542aea95871d482e6a751 is 50, key is test_row_0/C:col10/1733982011490/Put/seqid=0 2024-12-12T05:40:13,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742127_1303 (size=12151) 2024-12-12T05:40:13,173 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/a7b1d79b647542aea95871d482e6a751 2024-12-12T05:40:13,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/be7f1b853e86422e8b4636d1dce3c9c6 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/be7f1b853e86422e8b4636d1dce3c9c6 2024-12-12T05:40:13,182 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/be7f1b853e86422e8b4636d1dce3c9c6, entries=150, sequenceid=232, filesize=11.9 K 2024-12-12T05:40:13,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/4b517a299dcf428b96305ab6cab0c172 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/4b517a299dcf428b96305ab6cab0c172 2024-12-12T05:40:13,186 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/4b517a299dcf428b96305ab6cab0c172, entries=150, sequenceid=232, filesize=11.9 K 2024-12-12T05:40:13,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/a7b1d79b647542aea95871d482e6a751 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/a7b1d79b647542aea95871d482e6a751 2024-12-12T05:40:13,192 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/a7b1d79b647542aea95871d482e6a751, entries=150, sequenceid=232, filesize=11.9 K 2024-12-12T05:40:13,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-12T05:40:13,192 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=0 B/0 for 7b5eb541c096811fd997fc2b7e27d07f in 494ms, sequenceid=232, compaction requested=false 2024-12-12T05:40:13,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:13,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:13,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-12-12T05:40:13,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-12-12T05:40:13,195 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-12-12T05:40:13,195 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1030 sec 2024-12-12T05:40:13,197 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 1.1070 sec 2024-12-12T05:40:13,557 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/8d46c9f598604c5fbf0655ca4851717e as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/8d46c9f598604c5fbf0655ca4851717e 2024-12-12T05:40:13,561 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b5eb541c096811fd997fc2b7e27d07f/C of 7b5eb541c096811fd997fc2b7e27d07f into 8d46c9f598604c5fbf0655ca4851717e(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:13,561 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:13,561 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., storeName=7b5eb541c096811fd997fc2b7e27d07f/C, priority=13, startTime=1733982012696; duration=0sec 2024-12-12T05:40:13,561 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:13,561 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b5eb541c096811fd997fc2b7e27d07f:C 2024-12-12T05:40:13,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:13,623 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b5eb541c096811fd997fc2b7e27d07f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T05:40:13,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=A 2024-12-12T05:40:13,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:13,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=B 2024-12-12T05:40:13,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:13,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=C 2024-12-12T05:40:13,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:13,627 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/79860654cf334eac9874ef1fe9d6bfed is 50, key is test_row_0/A:col10/1733982013616/Put/seqid=0 2024-12-12T05:40:13,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742128_1304 (size=12151) 2024-12-12T05:40:13,671 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:13,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982073670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:13,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:13,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982073773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:13,977 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:13,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982073976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:14,030 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/79860654cf334eac9874ef1fe9d6bfed 2024-12-12T05:40:14,036 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/560f0d518e9d446fa1f91d68add22519 is 50, key is test_row_0/B:col10/1733982013616/Put/seqid=0 2024-12-12T05:40:14,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742129_1305 (size=12151) 2024-12-12T05:40:14,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-12T05:40:14,193 INFO [Thread-1180 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-12-12T05:40:14,194 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:40:14,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-12-12T05:40:14,195 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:40:14,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-12T05:40:14,195 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:40:14,195 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:40:14,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:14,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982074279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:14,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-12T05:40:14,346 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:14,347 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-12T05:40:14,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:14,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:14,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:14,347 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:14,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:14,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:14,440 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/560f0d518e9d446fa1f91d68add22519 2024-12-12T05:40:14,445 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/80d00d35611f40fd8c4ad11bd9db9140 is 50, key is test_row_0/C:col10/1733982013616/Put/seqid=0 2024-12-12T05:40:14,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742130_1306 (size=12151) 2024-12-12T05:40:14,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-12T05:40:14,499 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:14,499 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-12T05:40:14,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:14,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:14,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:14,499 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:14,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:14,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:14,651 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:14,651 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-12T05:40:14,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:14,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:14,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:14,651 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:14,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:14,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:14,784 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:14,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982074783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:14,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-12T05:40:14,803 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:14,803 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-12T05:40:14,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:14,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:14,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:14,803 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:14,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:14,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:14,849 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/80d00d35611f40fd8c4ad11bd9db9140 2024-12-12T05:40:14,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/79860654cf334eac9874ef1fe9d6bfed as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/79860654cf334eac9874ef1fe9d6bfed 2024-12-12T05:40:14,857 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/79860654cf334eac9874ef1fe9d6bfed, entries=150, sequenceid=246, filesize=11.9 K 2024-12-12T05:40:14,857 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/560f0d518e9d446fa1f91d68add22519 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/560f0d518e9d446fa1f91d68add22519 2024-12-12T05:40:14,860 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/560f0d518e9d446fa1f91d68add22519, entries=150, sequenceid=246, filesize=11.9 K 2024-12-12T05:40:14,861 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/80d00d35611f40fd8c4ad11bd9db9140 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/80d00d35611f40fd8c4ad11bd9db9140 2024-12-12T05:40:14,863 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/80d00d35611f40fd8c4ad11bd9db9140, entries=150, sequenceid=246, filesize=11.9 K 2024-12-12T05:40:14,864 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 7b5eb541c096811fd997fc2b7e27d07f in 1241ms, sequenceid=246, compaction requested=true 2024-12-12T05:40:14,864 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:14,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b5eb541c096811fd997fc2b7e27d07f:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:40:14,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:14,864 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:14,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b5eb541c096811fd997fc2b7e27d07f:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:40:14,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:14,865 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:14,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b5eb541c096811fd997fc2b7e27d07f:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:40:14,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:40:14,866 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:14,866 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:14,866 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 7b5eb541c096811fd997fc2b7e27d07f/B is initiating minor compaction (all files) 2024-12-12T05:40:14,866 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 7b5eb541c096811fd997fc2b7e27d07f/A is initiating minor compaction (all files) 2024-12-12T05:40:14,866 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b5eb541c096811fd997fc2b7e27d07f/B in TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:14,866 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b5eb541c096811fd997fc2b7e27d07f/A in TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:14,866 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/fb897a2872ca45478719a234cfec4849, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/be7f1b853e86422e8b4636d1dce3c9c6, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/79860654cf334eac9874ef1fe9d6bfed] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp, totalSize=36.1 K 2024-12-12T05:40:14,866 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/01293c94a23e4677a91808899f844188, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/4b517a299dcf428b96305ab6cab0c172, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/560f0d518e9d446fa1f91d68add22519] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp, totalSize=36.1 K 2024-12-12T05:40:14,867 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting fb897a2872ca45478719a234cfec4849, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733982009328 2024-12-12T05:40:14,867 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 01293c94a23e4677a91808899f844188, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733982009328 2024-12-12T05:40:14,867 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b517a299dcf428b96305ab6cab0c172, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733982011481 2024-12-12T05:40:14,867 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting be7f1b853e86422e8b4636d1dce3c9c6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733982011481 2024-12-12T05:40:14,867 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 560f0d518e9d446fa1f91d68add22519, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1733982013612 2024-12-12T05:40:14,867 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79860654cf334eac9874ef1fe9d6bfed, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1733982013612 2024-12-12T05:40:14,873 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b5eb541c096811fd997fc2b7e27d07f#B#compaction#258 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:14,874 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/18bb387d118c455fb58588936df915a7 is 50, key is test_row_0/B:col10/1733982013616/Put/seqid=0 2024-12-12T05:40:14,885 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b5eb541c096811fd997fc2b7e27d07f#A#compaction#259 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:14,886 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/5a893b61faad4f83b8fcf989de9f6926 is 50, key is test_row_0/A:col10/1733982013616/Put/seqid=0 2024-12-12T05:40:14,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742131_1307 (size=12731) 2024-12-12T05:40:14,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742132_1308 (size=12731) 2024-12-12T05:40:14,903 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/5a893b61faad4f83b8fcf989de9f6926 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/5a893b61faad4f83b8fcf989de9f6926 2024-12-12T05:40:14,908 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b5eb541c096811fd997fc2b7e27d07f/A of 7b5eb541c096811fd997fc2b7e27d07f into 5a893b61faad4f83b8fcf989de9f6926(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:14,908 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:14,908 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., storeName=7b5eb541c096811fd997fc2b7e27d07f/A, priority=13, startTime=1733982014864; duration=0sec 2024-12-12T05:40:14,908 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:40:14,908 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b5eb541c096811fd997fc2b7e27d07f:A 2024-12-12T05:40:14,908 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:14,910 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:14,910 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 7b5eb541c096811fd997fc2b7e27d07f/C is initiating minor compaction (all files) 2024-12-12T05:40:14,910 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b5eb541c096811fd997fc2b7e27d07f/C in TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:14,910 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/8d46c9f598604c5fbf0655ca4851717e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/a7b1d79b647542aea95871d482e6a751, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/80d00d35611f40fd8c4ad11bd9db9140] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp, totalSize=36.1 K 2024-12-12T05:40:14,910 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8d46c9f598604c5fbf0655ca4851717e, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733982009328 2024-12-12T05:40:14,911 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting a7b1d79b647542aea95871d482e6a751, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733982011481 2024-12-12T05:40:14,911 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 80d00d35611f40fd8c4ad11bd9db9140, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1733982013612 2024-12-12T05:40:14,918 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b5eb541c096811fd997fc2b7e27d07f#C#compaction#260 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:14,919 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/3977893b7d5b4b95a144c7c3e2fe8f9c is 50, key is test_row_0/C:col10/1733982013616/Put/seqid=0 2024-12-12T05:40:14,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742133_1309 (size=12731) 2024-12-12T05:40:14,942 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/3977893b7d5b4b95a144c7c3e2fe8f9c as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/3977893b7d5b4b95a144c7c3e2fe8f9c 2024-12-12T05:40:14,952 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b5eb541c096811fd997fc2b7e27d07f/C of 7b5eb541c096811fd997fc2b7e27d07f into 3977893b7d5b4b95a144c7c3e2fe8f9c(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:14,952 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:14,952 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., storeName=7b5eb541c096811fd997fc2b7e27d07f/C, priority=13, startTime=1733982014865; duration=0sec 2024-12-12T05:40:14,952 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:14,952 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b5eb541c096811fd997fc2b7e27d07f:C 2024-12-12T05:40:14,955 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:14,956 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-12T05:40:14,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:14,956 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing 7b5eb541c096811fd997fc2b7e27d07f 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T05:40:14,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=A 2024-12-12T05:40:14,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:14,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=B 2024-12-12T05:40:14,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:14,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=C 2024-12-12T05:40:14,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:14,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/ab4caafb9e964c778b3b001bb4a8ca60 is 50, key is test_row_0/A:col10/1733982013669/Put/seqid=0 2024-12-12T05:40:14,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742134_1310 (size=12301) 2024-12-12T05:40:15,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-12T05:40:15,297 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/18bb387d118c455fb58588936df915a7 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/18bb387d118c455fb58588936df915a7 2024-12-12T05:40:15,301 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b5eb541c096811fd997fc2b7e27d07f/B of 7b5eb541c096811fd997fc2b7e27d07f into 18bb387d118c455fb58588936df915a7(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:15,301 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:15,301 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., storeName=7b5eb541c096811fd997fc2b7e27d07f/B, priority=13, startTime=1733982014864; duration=0sec 2024-12-12T05:40:15,301 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:15,301 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b5eb541c096811fd997fc2b7e27d07f:B 2024-12-12T05:40:15,369 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/ab4caafb9e964c778b3b001bb4a8ca60 2024-12-12T05:40:15,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/e774c7b0d77e4603be36cd44e7391ab9 is 50, key is test_row_0/B:col10/1733982013669/Put/seqid=0 2024-12-12T05:40:15,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742135_1311 (size=12301) 2024-12-12T05:40:15,806 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/e774c7b0d77e4603be36cd44e7391ab9 2024-12-12T05:40:15,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:15,806 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:15,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/19b245bc5dec4d258a2cfaa733b635e6 is 50, key is test_row_0/C:col10/1733982013669/Put/seqid=0 2024-12-12T05:40:15,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:15,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982075820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:15,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742136_1312 (size=12301) 2024-12-12T05:40:15,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:15,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982075922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:16,065 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:16,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35328 deadline: 1733982076064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:16,066 DEBUG [Thread-1172 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8147 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., hostname=83e80bf221ca,46457,1733981928566, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:40:16,076 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:16,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35310 deadline: 1733982076075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:16,076 DEBUG [Thread-1178 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8157 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., hostname=83e80bf221ca,46457,1733981928566, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:40:16,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:16,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35316 deadline: 1733982076081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:16,082 DEBUG [Thread-1174 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8163 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., hostname=83e80bf221ca,46457,1733981928566, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:40:16,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:16,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35326 deadline: 1733982076093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:16,094 DEBUG [Thread-1170 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8175 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., hostname=83e80bf221ca,46457,1733981928566, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:40:16,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:16,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982076124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:16,238 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/19b245bc5dec4d258a2cfaa733b635e6 2024-12-12T05:40:16,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/ab4caafb9e964c778b3b001bb4a8ca60 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/ab4caafb9e964c778b3b001bb4a8ca60 2024-12-12T05:40:16,244 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/ab4caafb9e964c778b3b001bb4a8ca60, entries=150, sequenceid=273, filesize=12.0 K 2024-12-12T05:40:16,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/e774c7b0d77e4603be36cd44e7391ab9 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/e774c7b0d77e4603be36cd44e7391ab9 2024-12-12T05:40:16,248 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/e774c7b0d77e4603be36cd44e7391ab9, entries=150, sequenceid=273, filesize=12.0 K 2024-12-12T05:40:16,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/19b245bc5dec4d258a2cfaa733b635e6 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/19b245bc5dec4d258a2cfaa733b635e6 2024-12-12T05:40:16,252 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/19b245bc5dec4d258a2cfaa733b635e6, entries=150, sequenceid=273, filesize=12.0 K 2024-12-12T05:40:16,253 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 7b5eb541c096811fd997fc2b7e27d07f in 1297ms, sequenceid=273, compaction requested=false 2024-12-12T05:40:16,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:16,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:16,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-12-12T05:40:16,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-12-12T05:40:16,255 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-12-12T05:40:16,255 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0590 sec 2024-12-12T05:40:16,256 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 2.0610 sec 2024-12-12T05:40:16,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-12T05:40:16,298 INFO [Thread-1180 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-12-12T05:40:16,299 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:40:16,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-12-12T05:40:16,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-12T05:40:16,300 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:40:16,301 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:40:16,301 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:40:16,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-12T05:40:16,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:16,427 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b5eb541c096811fd997fc2b7e27d07f 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T05:40:16,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=A 2024-12-12T05:40:16,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:16,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=B 2024-12-12T05:40:16,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:16,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=C 2024-12-12T05:40:16,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:16,431 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/4af87928982e4767b539a08e5460d142 is 50, key is test_row_0/A:col10/1733982015819/Put/seqid=0 2024-12-12T05:40:16,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742137_1313 (size=14741) 2024-12-12T05:40:16,452 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:16,452 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-12T05:40:16,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:16,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:16,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:16,452 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:16,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:16,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:16,490 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:16,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982076489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:16,592 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:16,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982076591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:16,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-12T05:40:16,604 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:16,604 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-12T05:40:16,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:16,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:16,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:16,604 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:16,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:16,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:16,756 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:16,757 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-12T05:40:16,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:16,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:16,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:16,757 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:16,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:16,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:16,795 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:16,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982076794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:16,835 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/4af87928982e4767b539a08e5460d142 2024-12-12T05:40:16,841 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/241f9945495c4e7db61e4ae96dc81e07 is 50, key is test_row_0/B:col10/1733982015819/Put/seqid=0 2024-12-12T05:40:16,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742138_1314 (size=12301) 2024-12-12T05:40:16,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-12T05:40:16,908 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:16,909 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-12T05:40:16,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:16,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:16,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:16,909 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:16,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:16,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:16,987 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T05:40:17,061 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:17,061 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-12T05:40:17,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:17,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:17,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:17,061 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:17,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:17,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:17,097 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:17,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982077096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:17,212 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:17,212 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-12T05:40:17,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:17,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:17,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:17,212 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:17,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:17,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:17,245 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/241f9945495c4e7db61e4ae96dc81e07 2024-12-12T05:40:17,250 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/22aac9476ee84e0a98c110ac3e7177ce is 50, key is test_row_0/C:col10/1733982015819/Put/seqid=0 2024-12-12T05:40:17,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742139_1315 (size=12301) 2024-12-12T05:40:17,364 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:17,364 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-12T05:40:17,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:17,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:17,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:17,365 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:17,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:17,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:17,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-12T05:40:17,516 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:17,517 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-12T05:40:17,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:17,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:17,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:17,517 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:17,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:17,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:17,601 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:17,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982077600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:17,657 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/22aac9476ee84e0a98c110ac3e7177ce 2024-12-12T05:40:17,660 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/4af87928982e4767b539a08e5460d142 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/4af87928982e4767b539a08e5460d142 2024-12-12T05:40:17,663 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/4af87928982e4767b539a08e5460d142, entries=200, sequenceid=286, filesize=14.4 K 2024-12-12T05:40:17,664 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/241f9945495c4e7db61e4ae96dc81e07 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/241f9945495c4e7db61e4ae96dc81e07 2024-12-12T05:40:17,667 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/241f9945495c4e7db61e4ae96dc81e07, entries=150, sequenceid=286, filesize=12.0 K 2024-12-12T05:40:17,667 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/22aac9476ee84e0a98c110ac3e7177ce as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/22aac9476ee84e0a98c110ac3e7177ce 2024-12-12T05:40:17,668 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:17,669 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-12T05:40:17,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:17,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:17,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:17,669 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:17,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:17,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:17,672 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/22aac9476ee84e0a98c110ac3e7177ce, entries=150, sequenceid=286, filesize=12.0 K 2024-12-12T05:40:17,672 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 7b5eb541c096811fd997fc2b7e27d07f in 1245ms, sequenceid=286, compaction requested=true 2024-12-12T05:40:17,672 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:17,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b5eb541c096811fd997fc2b7e27d07f:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:40:17,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:17,673 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:17,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b5eb541c096811fd997fc2b7e27d07f:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:40:17,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:17,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b5eb541c096811fd997fc2b7e27d07f:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:40:17,673 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:17,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:40:17,673 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39773 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:17,673 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:17,673 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 7b5eb541c096811fd997fc2b7e27d07f/B is initiating minor compaction (all files) 2024-12-12T05:40:17,673 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 7b5eb541c096811fd997fc2b7e27d07f/A is initiating minor compaction (all files) 2024-12-12T05:40:17,674 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b5eb541c096811fd997fc2b7e27d07f/A in TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:17,674 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b5eb541c096811fd997fc2b7e27d07f/B in TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:17,674 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/18bb387d118c455fb58588936df915a7, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/e774c7b0d77e4603be36cd44e7391ab9, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/241f9945495c4e7db61e4ae96dc81e07] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp, totalSize=36.5 K 2024-12-12T05:40:17,674 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/5a893b61faad4f83b8fcf989de9f6926, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/ab4caafb9e964c778b3b001bb4a8ca60, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/4af87928982e4767b539a08e5460d142] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp, totalSize=38.8 K 2024-12-12T05:40:17,674 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 18bb387d118c455fb58588936df915a7, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1733982013612 2024-12-12T05:40:17,674 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting e774c7b0d77e4603be36cd44e7391ab9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1733982013662 2024-12-12T05:40:17,674 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5a893b61faad4f83b8fcf989de9f6926, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1733982013612 2024-12-12T05:40:17,674 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 241f9945495c4e7db61e4ae96dc81e07, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1733982015812 2024-12-12T05:40:17,675 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting ab4caafb9e964c778b3b001bb4a8ca60, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1733982013662 2024-12-12T05:40:17,675 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4af87928982e4767b539a08e5460d142, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1733982015812 2024-12-12T05:40:17,681 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b5eb541c096811fd997fc2b7e27d07f#B#compaction#267 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:17,681 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b5eb541c096811fd997fc2b7e27d07f#A#compaction#268 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:17,681 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/0d1a239b49a6408fb3a5266aeeb86998 is 50, key is test_row_0/B:col10/1733982015819/Put/seqid=0 2024-12-12T05:40:17,681 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/931c7dd2b26c4a17a6a2187dfc4ad8f2 is 50, key is test_row_0/A:col10/1733982015819/Put/seqid=0 2024-12-12T05:40:17,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742141_1317 (size=12983) 2024-12-12T05:40:17,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742140_1316 (size=12983) 2024-12-12T05:40:17,694 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/931c7dd2b26c4a17a6a2187dfc4ad8f2 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/931c7dd2b26c4a17a6a2187dfc4ad8f2 2024-12-12T05:40:17,697 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b5eb541c096811fd997fc2b7e27d07f/A of 7b5eb541c096811fd997fc2b7e27d07f into 931c7dd2b26c4a17a6a2187dfc4ad8f2(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:17,697 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:17,697 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., storeName=7b5eb541c096811fd997fc2b7e27d07f/A, priority=13, startTime=1733982017672; duration=0sec 2024-12-12T05:40:17,697 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:40:17,698 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b5eb541c096811fd997fc2b7e27d07f:A 2024-12-12T05:40:17,698 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:17,698 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:17,698 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 7b5eb541c096811fd997fc2b7e27d07f/C is initiating minor compaction (all files) 2024-12-12T05:40:17,699 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b5eb541c096811fd997fc2b7e27d07f/C in TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:17,699 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/3977893b7d5b4b95a144c7c3e2fe8f9c, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/19b245bc5dec4d258a2cfaa733b635e6, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/22aac9476ee84e0a98c110ac3e7177ce] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp, totalSize=36.5 K 2024-12-12T05:40:17,699 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3977893b7d5b4b95a144c7c3e2fe8f9c, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1733982013612 2024-12-12T05:40:17,699 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 19b245bc5dec4d258a2cfaa733b635e6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1733982013662 2024-12-12T05:40:17,699 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 22aac9476ee84e0a98c110ac3e7177ce, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1733982015812 2024-12-12T05:40:17,705 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b5eb541c096811fd997fc2b7e27d07f#C#compaction#269 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:17,706 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/b5506ec319a14eeda6afb1c7b7e2b6a5 is 50, key is test_row_0/C:col10/1733982015819/Put/seqid=0 2024-12-12T05:40:17,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742142_1318 (size=12983) 2024-12-12T05:40:17,821 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:17,821 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-12T05:40:17,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:17,821 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing 7b5eb541c096811fd997fc2b7e27d07f 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T05:40:17,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=A 2024-12-12T05:40:17,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:17,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=B 2024-12-12T05:40:17,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:17,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=C 2024-12-12T05:40:17,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:17,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/16330d60f28c4deaa6c2e3721baadb40 is 50, key is test_row_0/A:col10/1733982016480/Put/seqid=0 2024-12-12T05:40:17,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742143_1319 (size=12301) 2024-12-12T05:40:18,089 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/0d1a239b49a6408fb3a5266aeeb86998 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/0d1a239b49a6408fb3a5266aeeb86998 2024-12-12T05:40:18,093 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b5eb541c096811fd997fc2b7e27d07f/B of 7b5eb541c096811fd997fc2b7e27d07f into 0d1a239b49a6408fb3a5266aeeb86998(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:18,093 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:18,093 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., storeName=7b5eb541c096811fd997fc2b7e27d07f/B, priority=13, startTime=1733982017673; duration=0sec 2024-12-12T05:40:18,093 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:18,093 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b5eb541c096811fd997fc2b7e27d07f:B 2024-12-12T05:40:18,113 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/b5506ec319a14eeda6afb1c7b7e2b6a5 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/b5506ec319a14eeda6afb1c7b7e2b6a5 2024-12-12T05:40:18,117 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b5eb541c096811fd997fc2b7e27d07f/C of 7b5eb541c096811fd997fc2b7e27d07f into b5506ec319a14eeda6afb1c7b7e2b6a5(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:18,117 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:18,117 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., storeName=7b5eb541c096811fd997fc2b7e27d07f/C, priority=13, startTime=1733982017673; duration=0sec 2024-12-12T05:40:18,117 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:18,117 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b5eb541c096811fd997fc2b7e27d07f:C 2024-12-12T05:40:18,228 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/16330d60f28c4deaa6c2e3721baadb40 2024-12-12T05:40:18,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/e616da1bb06649299f5b4129b3c0edec is 50, key is test_row_0/B:col10/1733982016480/Put/seqid=0 2024-12-12T05:40:18,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742144_1320 (size=12301) 2024-12-12T05:40:18,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-12T05:40:18,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:18,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:18,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:18,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982078622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:18,649 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/e616da1bb06649299f5b4129b3c0edec 2024-12-12T05:40:18,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/823873033abe4fb6911b82a363b6aab7 is 50, key is test_row_0/C:col10/1733982016480/Put/seqid=0 2024-12-12T05:40:18,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742145_1321 (size=12301) 2024-12-12T05:40:18,725 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:18,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982078724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:18,928 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:18,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982078927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:19,060 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/823873033abe4fb6911b82a363b6aab7 2024-12-12T05:40:19,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/16330d60f28c4deaa6c2e3721baadb40 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/16330d60f28c4deaa6c2e3721baadb40 2024-12-12T05:40:19,067 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/16330d60f28c4deaa6c2e3721baadb40, entries=150, sequenceid=311, filesize=12.0 K 2024-12-12T05:40:19,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/e616da1bb06649299f5b4129b3c0edec as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/e616da1bb06649299f5b4129b3c0edec 2024-12-12T05:40:19,071 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/e616da1bb06649299f5b4129b3c0edec, entries=150, sequenceid=311, filesize=12.0 K 2024-12-12T05:40:19,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/823873033abe4fb6911b82a363b6aab7 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/823873033abe4fb6911b82a363b6aab7 2024-12-12T05:40:19,075 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/823873033abe4fb6911b82a363b6aab7, entries=150, sequenceid=311, filesize=12.0 K 2024-12-12T05:40:19,076 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 7b5eb541c096811fd997fc2b7e27d07f in 1255ms, sequenceid=311, compaction requested=false 2024-12-12T05:40:19,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:19,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:19,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-12-12T05:40:19,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-12-12T05:40:19,079 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-12-12T05:40:19,079 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7760 sec 2024-12-12T05:40:19,079 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 2.7800 sec 2024-12-12T05:40:19,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:19,232 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b5eb541c096811fd997fc2b7e27d07f 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-12T05:40:19,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=A 2024-12-12T05:40:19,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:19,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=B 2024-12-12T05:40:19,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:19,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=C 2024-12-12T05:40:19,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:19,236 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/13a78b24991744b5a56bc841191c61ec is 50, key is test_row_0/A:col10/1733982018612/Put/seqid=0 2024-12-12T05:40:19,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742146_1322 (size=14741) 2024-12-12T05:40:19,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:19,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982079276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:19,381 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:19,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982079380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:19,584 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:19,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982079583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:19,641 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/13a78b24991744b5a56bc841191c61ec 2024-12-12T05:40:19,646 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/ab6ca056a2f94dd59015a31232baf8ea is 50, key is test_row_0/B:col10/1733982018612/Put/seqid=0 2024-12-12T05:40:19,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742147_1323 (size=12301) 2024-12-12T05:40:19,886 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:19,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982079885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:20,050 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/ab6ca056a2f94dd59015a31232baf8ea 2024-12-12T05:40:20,056 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/c721cf52adbc43ca95012b4258e7c82a is 50, key is test_row_0/C:col10/1733982018612/Put/seqid=0 2024-12-12T05:40:20,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742148_1324 (size=12301) 2024-12-12T05:40:20,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:20,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35334 deadline: 1733982080387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:20,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-12T05:40:20,404 INFO [Thread-1180 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-12-12T05:40:20,405 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:40:20,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-12-12T05:40:20,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-12T05:40:20,406 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:40:20,406 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:40:20,406 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:40:20,459 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/c721cf52adbc43ca95012b4258e7c82a 2024-12-12T05:40:20,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/13a78b24991744b5a56bc841191c61ec as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/13a78b24991744b5a56bc841191c61ec 2024-12-12T05:40:20,465 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/13a78b24991744b5a56bc841191c61ec, entries=200, sequenceid=326, filesize=14.4 K 2024-12-12T05:40:20,466 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/ab6ca056a2f94dd59015a31232baf8ea as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/ab6ca056a2f94dd59015a31232baf8ea 2024-12-12T05:40:20,469 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/ab6ca056a2f94dd59015a31232baf8ea, entries=150, sequenceid=326, filesize=12.0 K 2024-12-12T05:40:20,469 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/c721cf52adbc43ca95012b4258e7c82a as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/c721cf52adbc43ca95012b4258e7c82a 2024-12-12T05:40:20,472 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/c721cf52adbc43ca95012b4258e7c82a, entries=150, sequenceid=326, filesize=12.0 K 2024-12-12T05:40:20,473 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 7b5eb541c096811fd997fc2b7e27d07f in 1242ms, sequenceid=326, compaction requested=true 2024-12-12T05:40:20,473 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:20,473 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b5eb541c096811fd997fc2b7e27d07f:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:40:20,473 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:20,473 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b5eb541c096811fd997fc2b7e27d07f:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:40:20,473 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:20,473 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b5eb541c096811fd997fc2b7e27d07f:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:40:20,473 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:40:20,473 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:20,473 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:20,474 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:20,474 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40025 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:20,474 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 7b5eb541c096811fd997fc2b7e27d07f/B is initiating minor compaction (all files) 2024-12-12T05:40:20,474 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 7b5eb541c096811fd997fc2b7e27d07f/A is initiating minor compaction (all files) 2024-12-12T05:40:20,474 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b5eb541c096811fd997fc2b7e27d07f/B in TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:20,474 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b5eb541c096811fd997fc2b7e27d07f/A in TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:20,474 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/0d1a239b49a6408fb3a5266aeeb86998, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/e616da1bb06649299f5b4129b3c0edec, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/ab6ca056a2f94dd59015a31232baf8ea] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp, totalSize=36.7 K 2024-12-12T05:40:20,474 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/931c7dd2b26c4a17a6a2187dfc4ad8f2, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/16330d60f28c4deaa6c2e3721baadb40, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/13a78b24991744b5a56bc841191c61ec] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp, totalSize=39.1 K 2024-12-12T05:40:20,474 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d1a239b49a6408fb3a5266aeeb86998, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1733982015812 2024-12-12T05:40:20,474 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 931c7dd2b26c4a17a6a2187dfc4ad8f2, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1733982015812 2024-12-12T05:40:20,474 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting e616da1bb06649299f5b4129b3c0edec, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1733982016466 2024-12-12T05:40:20,474 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 16330d60f28c4deaa6c2e3721baadb40, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1733982016466 2024-12-12T05:40:20,475 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting ab6ca056a2f94dd59015a31232baf8ea, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1733982018612 2024-12-12T05:40:20,475 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 13a78b24991744b5a56bc841191c61ec, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1733982018612 2024-12-12T05:40:20,480 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b5eb541c096811fd997fc2b7e27d07f#A#compaction#276 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:20,480 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b5eb541c096811fd997fc2b7e27d07f#B#compaction#277 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:20,480 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/42720aacecb6400da0fae372411c86f4 is 50, key is test_row_0/A:col10/1733982018612/Put/seqid=0 2024-12-12T05:40:20,480 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/0e25d7c5ab664e08acc0c3b1718d5708 is 50, key is test_row_0/B:col10/1733982018612/Put/seqid=0 2024-12-12T05:40:20,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742150_1326 (size=13085) 2024-12-12T05:40:20,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-12T05:40:20,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742149_1325 (size=13085) 2024-12-12T05:40:20,557 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:20,558 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-12T05:40:20,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:20,558 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing 7b5eb541c096811fd997fc2b7e27d07f 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-12T05:40:20,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=A 2024-12-12T05:40:20,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:20,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=B 2024-12-12T05:40:20,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:20,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=C 2024-12-12T05:40:20,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:20,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/4093c224f1d84719b8b085ac18429adb is 50, key is test_row_0/A:col10/1733982019270/Put/seqid=0 2024-12-12T05:40:20,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742151_1327 (size=12301) 2024-12-12T05:40:20,566 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/4093c224f1d84719b8b085ac18429adb 2024-12-12T05:40:20,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/8187fae2039c4638958de5f96b17b695 is 50, key is test_row_0/B:col10/1733982019270/Put/seqid=0 2024-12-12T05:40:20,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742152_1328 (size=12301) 2024-12-12T05:40:20,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-12T05:40:21,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-12T05:40:21,056 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/8187fae2039c4638958de5f96b17b695 2024-12-12T05:40:21,059 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/0e25d7c5ab664e08acc0c3b1718d5708 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/0e25d7c5ab664e08acc0c3b1718d5708 2024-12-12T05:40:21,060 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/42720aacecb6400da0fae372411c86f4 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/42720aacecb6400da0fae372411c86f4 2024-12-12T05:40:21,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/72aedd45564a4b9dbdeba014b6b2c999 is 50, key is test_row_0/C:col10/1733982019270/Put/seqid=0 2024-12-12T05:40:21,066 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b5eb541c096811fd997fc2b7e27d07f/A of 7b5eb541c096811fd997fc2b7e27d07f into 42720aacecb6400da0fae372411c86f4(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:21,066 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:21,066 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., storeName=7b5eb541c096811fd997fc2b7e27d07f/A, priority=13, startTime=1733982020473; duration=0sec 2024-12-12T05:40:21,066 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:40:21,066 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b5eb541c096811fd997fc2b7e27d07f:A 2024-12-12T05:40:21,066 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:21,067 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b5eb541c096811fd997fc2b7e27d07f/B of 7b5eb541c096811fd997fc2b7e27d07f into 0e25d7c5ab664e08acc0c3b1718d5708(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:21,067 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:21,067 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., storeName=7b5eb541c096811fd997fc2b7e27d07f/B, priority=13, startTime=1733982020473; duration=0sec 2024-12-12T05:40:21,067 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:21,067 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b5eb541c096811fd997fc2b7e27d07f:B 2024-12-12T05:40:21,069 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:21,069 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 7b5eb541c096811fd997fc2b7e27d07f/C is initiating minor compaction (all files) 2024-12-12T05:40:21,069 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b5eb541c096811fd997fc2b7e27d07f/C in TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:21,069 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/b5506ec319a14eeda6afb1c7b7e2b6a5, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/823873033abe4fb6911b82a363b6aab7, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/c721cf52adbc43ca95012b4258e7c82a] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp, totalSize=36.7 K 2024-12-12T05:40:21,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742153_1329 (size=12301) 2024-12-12T05:40:21,070 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting b5506ec319a14eeda6afb1c7b7e2b6a5, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1733982015812 2024-12-12T05:40:21,070 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 823873033abe4fb6911b82a363b6aab7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1733982016466 2024-12-12T05:40:21,070 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting c721cf52adbc43ca95012b4258e7c82a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1733982018612 2024-12-12T05:40:21,076 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b5eb541c096811fd997fc2b7e27d07f#C#compaction#281 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:21,076 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/04e93ab2460540ce9a1c370db63d8da2 is 50, key is test_row_0/C:col10/1733982018612/Put/seqid=0 2024-12-12T05:40:21,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742154_1330 (size=13085) 2024-12-12T05:40:21,183 DEBUG [Thread-1189 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2f7f772a to 127.0.0.1:60303 2024-12-12T05:40:21,183 DEBUG [Thread-1185 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4b9e2976 to 127.0.0.1:60303 2024-12-12T05:40:21,184 DEBUG [Thread-1189 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:40:21,184 DEBUG [Thread-1185 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:40:21,184 DEBUG [Thread-1187 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x56e9a678 to 127.0.0.1:60303 2024-12-12T05:40:21,184 DEBUG [Thread-1187 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:40:21,184 DEBUG [Thread-1181 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5a78bf6d to 127.0.0.1:60303 2024-12-12T05:40:21,184 DEBUG [Thread-1181 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:40:21,185 DEBUG [Thread-1183 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x328852db to 127.0.0.1:60303 2024-12-12T05:40:21,185 DEBUG [Thread-1183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:40:21,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:21,401 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. as already flushing 2024-12-12T05:40:21,402 DEBUG [Thread-1176 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x088aa519 to 127.0.0.1:60303 2024-12-12T05:40:21,402 DEBUG [Thread-1176 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:40:21,472 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/72aedd45564a4b9dbdeba014b6b2c999 2024-12-12T05:40:21,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/4093c224f1d84719b8b085ac18429adb as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/4093c224f1d84719b8b085ac18429adb 2024-12-12T05:40:21,485 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/04e93ab2460540ce9a1c370db63d8da2 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/04e93ab2460540ce9a1c370db63d8da2 2024-12-12T05:40:21,486 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/4093c224f1d84719b8b085ac18429adb, entries=150, sequenceid=349, filesize=12.0 K 2024-12-12T05:40:21,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/8187fae2039c4638958de5f96b17b695 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/8187fae2039c4638958de5f96b17b695 2024-12-12T05:40:21,489 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b5eb541c096811fd997fc2b7e27d07f/C of 7b5eb541c096811fd997fc2b7e27d07f into 04e93ab2460540ce9a1c370db63d8da2(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:21,489 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:21,489 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f., storeName=7b5eb541c096811fd997fc2b7e27d07f/C, priority=13, startTime=1733982020473; duration=0sec 2024-12-12T05:40:21,489 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:21,489 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b5eb541c096811fd997fc2b7e27d07f:C 2024-12-12T05:40:21,490 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/8187fae2039c4638958de5f96b17b695, entries=150, sequenceid=349, filesize=12.0 K 2024-12-12T05:40:21,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/72aedd45564a4b9dbdeba014b6b2c999 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/72aedd45564a4b9dbdeba014b6b2c999 2024-12-12T05:40:21,493 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/72aedd45564a4b9dbdeba014b6b2c999, entries=150, sequenceid=349, filesize=12.0 K 2024-12-12T05:40:21,494 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=6.71 KB/6870 for 7b5eb541c096811fd997fc2b7e27d07f in 936ms, sequenceid=349, compaction requested=false 2024-12-12T05:40:21,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:21,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:21,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-12-12T05:40:21,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-12-12T05:40:21,496 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-12-12T05:40:21,496 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0890 sec 2024-12-12T05:40:21,497 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 1.0910 sec 2024-12-12T05:40:21,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-12T05:40:21,508 INFO [Thread-1180 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-12-12T05:40:26,182 DEBUG [Thread-1172 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17cf7fc0 to 127.0.0.1:60303 2024-12-12T05:40:26,182 DEBUG [Thread-1178 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5e998dd3 to 127.0.0.1:60303 2024-12-12T05:40:26,182 DEBUG [Thread-1172 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:40:26,182 DEBUG [Thread-1178 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:40:26,182 DEBUG [Thread-1174 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x78b04266 to 127.0.0.1:60303 2024-12-12T05:40:26,182 DEBUG [Thread-1174 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:40:26,184 DEBUG [Thread-1170 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4dfb20f6 to 127.0.0.1:60303 2024-12-12T05:40:26,184 DEBUG [Thread-1170 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:40:26,185 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-12T05:40:26,185 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 30 2024-12-12T05:40:26,185 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 30 2024-12-12T05:40:26,185 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 33 2024-12-12T05:40:26,185 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 149 2024-12-12T05:40:26,185 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 39 2024-12-12T05:40:26,185 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-12T05:40:26,185 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8969 2024-12-12T05:40:26,185 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8781 2024-12-12T05:40:26,185 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8617 2024-12-12T05:40:26,185 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 9031 2024-12-12T05:40:26,185 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8793 2024-12-12T05:40:26,185 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-12T05:40:26,185 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-12T05:40:26,185 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4ec09297 to 127.0.0.1:60303 2024-12-12T05:40:26,185 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:40:26,186 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-12T05:40:26,186 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-12T05:40:26,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-12T05:40:26,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-12T05:40:26,189 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733982026189"}]},"ts":"1733982026189"} 2024-12-12T05:40:26,190 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-12T05:40:26,227 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-12T05:40:26,227 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T05:40:26,229 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b5eb541c096811fd997fc2b7e27d07f, UNASSIGN}] 2024-12-12T05:40:26,229 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b5eb541c096811fd997fc2b7e27d07f, UNASSIGN 2024-12-12T05:40:26,230 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=91 updating hbase:meta row=7b5eb541c096811fd997fc2b7e27d07f, regionState=CLOSING, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:26,231 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T05:40:26,231 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; CloseRegionProcedure 7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566}] 2024-12-12T05:40:26,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-12T05:40:26,383 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:26,384 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(124): Close 7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:26,384 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T05:40:26,384 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1681): Closing 7b5eb541c096811fd997fc2b7e27d07f, disabling compactions & flushes 2024-12-12T05:40:26,384 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:26,384 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:26,385 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. after waiting 0 ms 2024-12-12T05:40:26,385 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:26,385 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(2837): Flushing 7b5eb541c096811fd997fc2b7e27d07f 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-12T05:40:26,385 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=A 2024-12-12T05:40:26,386 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:26,386 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=B 2024-12-12T05:40:26,386 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:26,386 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b5eb541c096811fd997fc2b7e27d07f, store=C 2024-12-12T05:40:26,386 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:26,395 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/6f154fde3d3c480ea9e72c215dbbc54c is 50, key is test_row_1/A:col10/1733982026110/Put/seqid=0 2024-12-12T05:40:26,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742155_1331 (size=9857) 2024-12-12T05:40:26,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-12T05:40:26,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-12T05:40:26,800 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/6f154fde3d3c480ea9e72c215dbbc54c 2024-12-12T05:40:26,814 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/9ceb145738014c0cbdfbca51a821b62e is 50, key is test_row_1/B:col10/1733982026110/Put/seqid=0 2024-12-12T05:40:26,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742156_1332 (size=9857) 2024-12-12T05:40:27,220 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/9ceb145738014c0cbdfbca51a821b62e 2024-12-12T05:40:27,235 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/0123a69dc6d0456db1aa392dced59fd8 is 50, key is test_row_1/C:col10/1733982026110/Put/seqid=0 2024-12-12T05:40:27,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742157_1333 (size=9857) 2024-12-12T05:40:27,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-12T05:40:27,640 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/0123a69dc6d0456db1aa392dced59fd8 2024-12-12T05:40:27,651 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/A/6f154fde3d3c480ea9e72c215dbbc54c as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/6f154fde3d3c480ea9e72c215dbbc54c 2024-12-12T05:40:27,654 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/6f154fde3d3c480ea9e72c215dbbc54c, entries=100, sequenceid=360, filesize=9.6 K 2024-12-12T05:40:27,655 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/B/9ceb145738014c0cbdfbca51a821b62e as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/9ceb145738014c0cbdfbca51a821b62e 2024-12-12T05:40:27,657 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/9ceb145738014c0cbdfbca51a821b62e, entries=100, sequenceid=360, filesize=9.6 K 2024-12-12T05:40:27,658 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/.tmp/C/0123a69dc6d0456db1aa392dced59fd8 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/0123a69dc6d0456db1aa392dced59fd8 2024-12-12T05:40:27,660 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/0123a69dc6d0456db1aa392dced59fd8, entries=100, sequenceid=360, filesize=9.6 K 2024-12-12T05:40:27,661 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 7b5eb541c096811fd997fc2b7e27d07f in 1276ms, sequenceid=360, compaction requested=true 2024-12-12T05:40:27,661 DEBUG [StoreCloser-TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/0e885bcb1b9047ddb992db34c013d139, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/c1e889c5d91d4a71a5d29a8edbc7fb69, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/f499c03d28aa4f06827e8819002d1c70, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/d20b45abc55647cabfc63a1abae2aff4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/b9b5b8517a7b4635a7d72c6366956aee, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/4fba4c77a3734f3ebb1b90bb9cce4074, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/c297fc4f20da4b51a5ae8b7cb5a663c3, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/79efab4ee8524a0ea816806b6bc7c2c1, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/848b1dd989224999ac8d2deabb6461ae, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/310b0063b69746b8b3125c13c26ad355, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/570ef6a7701f4d0d863b63399c40e529, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/d419f3aa52c8443fb371ad044b4b5827, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/1baca4f92667460e91b54013fb8306ba, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/fb897a2872ca45478719a234cfec4849, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/5f3cb66c741246be983e819bc0df569f, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/be7f1b853e86422e8b4636d1dce3c9c6, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/5a893b61faad4f83b8fcf989de9f6926, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/79860654cf334eac9874ef1fe9d6bfed, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/ab4caafb9e964c778b3b001bb4a8ca60, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/4af87928982e4767b539a08e5460d142, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/931c7dd2b26c4a17a6a2187dfc4ad8f2, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/16330d60f28c4deaa6c2e3721baadb40, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/13a78b24991744b5a56bc841191c61ec] to archive 2024-12-12T05:40:27,662 DEBUG [StoreCloser-TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T05:40:27,664 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/c1e889c5d91d4a71a5d29a8edbc7fb69 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/c1e889c5d91d4a71a5d29a8edbc7fb69 2024-12-12T05:40:27,664 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/0e885bcb1b9047ddb992db34c013d139 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/0e885bcb1b9047ddb992db34c013d139 2024-12-12T05:40:27,664 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/f499c03d28aa4f06827e8819002d1c70 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/f499c03d28aa4f06827e8819002d1c70 2024-12-12T05:40:27,664 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/d20b45abc55647cabfc63a1abae2aff4 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/d20b45abc55647cabfc63a1abae2aff4 2024-12-12T05:40:27,664 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/b9b5b8517a7b4635a7d72c6366956aee to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/b9b5b8517a7b4635a7d72c6366956aee 2024-12-12T05:40:27,664 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/4fba4c77a3734f3ebb1b90bb9cce4074 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/4fba4c77a3734f3ebb1b90bb9cce4074 2024-12-12T05:40:27,664 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/79efab4ee8524a0ea816806b6bc7c2c1 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/79efab4ee8524a0ea816806b6bc7c2c1 2024-12-12T05:40:27,665 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/c297fc4f20da4b51a5ae8b7cb5a663c3 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/c297fc4f20da4b51a5ae8b7cb5a663c3 2024-12-12T05:40:27,665 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/848b1dd989224999ac8d2deabb6461ae to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/848b1dd989224999ac8d2deabb6461ae 2024-12-12T05:40:27,666 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/310b0063b69746b8b3125c13c26ad355 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/310b0063b69746b8b3125c13c26ad355 2024-12-12T05:40:27,666 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/570ef6a7701f4d0d863b63399c40e529 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/570ef6a7701f4d0d863b63399c40e529 2024-12-12T05:40:27,666 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/d419f3aa52c8443fb371ad044b4b5827 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/d419f3aa52c8443fb371ad044b4b5827 2024-12-12T05:40:27,666 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/5f3cb66c741246be983e819bc0df569f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/5f3cb66c741246be983e819bc0df569f 2024-12-12T05:40:27,666 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/1baca4f92667460e91b54013fb8306ba to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/1baca4f92667460e91b54013fb8306ba 2024-12-12T05:40:27,666 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/fb897a2872ca45478719a234cfec4849 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/fb897a2872ca45478719a234cfec4849 2024-12-12T05:40:27,666 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/be7f1b853e86422e8b4636d1dce3c9c6 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/be7f1b853e86422e8b4636d1dce3c9c6 2024-12-12T05:40:27,667 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/5a893b61faad4f83b8fcf989de9f6926 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/5a893b61faad4f83b8fcf989de9f6926 2024-12-12T05:40:27,667 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/79860654cf334eac9874ef1fe9d6bfed to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/79860654cf334eac9874ef1fe9d6bfed 2024-12-12T05:40:27,667 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/ab4caafb9e964c778b3b001bb4a8ca60 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/ab4caafb9e964c778b3b001bb4a8ca60 2024-12-12T05:40:27,667 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/4af87928982e4767b539a08e5460d142 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/4af87928982e4767b539a08e5460d142 2024-12-12T05:40:27,667 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/16330d60f28c4deaa6c2e3721baadb40 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/16330d60f28c4deaa6c2e3721baadb40 2024-12-12T05:40:27,667 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/931c7dd2b26c4a17a6a2187dfc4ad8f2 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/931c7dd2b26c4a17a6a2187dfc4ad8f2 2024-12-12T05:40:27,667 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/13a78b24991744b5a56bc841191c61ec to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/13a78b24991744b5a56bc841191c61ec 2024-12-12T05:40:27,668 DEBUG [StoreCloser-TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/c16432ff81c04e69afe0d4a3798bd1da, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/8498cf857dd249e285377f5559421ff9, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/0e05a93d04034f5db25c7bfe859cc3b9, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/3c5d21f7b72c4d07badbf816b76e7af8, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/c7cee429460c4eb5a2ed20a03bee8f22, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/f2d30aac4f1a497d93d86d5f221c5da4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/c08f42e3fcfb4630b49c1edf7964d4de, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/11285ba0475a4ab298779786f2205387, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/4a5e12392cb94711aea8af66049c0e8d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/c5ca8af080aa48e6a5c2bccc1ff01c67, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/39b3e0e63c9943f583d27518ee9398b1, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/8ba61d1a261947c5a31d6898f9eac5be, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/4ed4a9af754640d3ab89686d2ac558f0, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/01293c94a23e4677a91808899f844188, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/fbcae38606194273a1c64248f0217ce9, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/4b517a299dcf428b96305ab6cab0c172, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/18bb387d118c455fb58588936df915a7, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/560f0d518e9d446fa1f91d68add22519, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/e774c7b0d77e4603be36cd44e7391ab9, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/0d1a239b49a6408fb3a5266aeeb86998, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/241f9945495c4e7db61e4ae96dc81e07, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/e616da1bb06649299f5b4129b3c0edec, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/ab6ca056a2f94dd59015a31232baf8ea] to archive 2024-12-12T05:40:27,669 DEBUG [StoreCloser-TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T05:40:27,671 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/3c5d21f7b72c4d07badbf816b76e7af8 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/3c5d21f7b72c4d07badbf816b76e7af8 2024-12-12T05:40:27,671 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/8498cf857dd249e285377f5559421ff9 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/8498cf857dd249e285377f5559421ff9 2024-12-12T05:40:27,671 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/c16432ff81c04e69afe0d4a3798bd1da to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/c16432ff81c04e69afe0d4a3798bd1da 2024-12-12T05:40:27,671 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/0e05a93d04034f5db25c7bfe859cc3b9 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/0e05a93d04034f5db25c7bfe859cc3b9 2024-12-12T05:40:27,671 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/f2d30aac4f1a497d93d86d5f221c5da4 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/f2d30aac4f1a497d93d86d5f221c5da4 2024-12-12T05:40:27,671 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/c7cee429460c4eb5a2ed20a03bee8f22 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/c7cee429460c4eb5a2ed20a03bee8f22 2024-12-12T05:40:27,671 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/11285ba0475a4ab298779786f2205387 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/11285ba0475a4ab298779786f2205387 2024-12-12T05:40:27,672 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/c08f42e3fcfb4630b49c1edf7964d4de to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/c08f42e3fcfb4630b49c1edf7964d4de 2024-12-12T05:40:27,672 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/39b3e0e63c9943f583d27518ee9398b1 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/39b3e0e63c9943f583d27518ee9398b1 2024-12-12T05:40:27,672 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/4a5e12392cb94711aea8af66049c0e8d to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/4a5e12392cb94711aea8af66049c0e8d 2024-12-12T05:40:27,672 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/4ed4a9af754640d3ab89686d2ac558f0 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/4ed4a9af754640d3ab89686d2ac558f0 2024-12-12T05:40:27,672 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/01293c94a23e4677a91808899f844188 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/01293c94a23e4677a91808899f844188 2024-12-12T05:40:27,673 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/c5ca8af080aa48e6a5c2bccc1ff01c67 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/c5ca8af080aa48e6a5c2bccc1ff01c67 2024-12-12T05:40:27,673 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/fbcae38606194273a1c64248f0217ce9 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/fbcae38606194273a1c64248f0217ce9 2024-12-12T05:40:27,673 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/8ba61d1a261947c5a31d6898f9eac5be to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/8ba61d1a261947c5a31d6898f9eac5be 2024-12-12T05:40:27,673 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/4b517a299dcf428b96305ab6cab0c172 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/4b517a299dcf428b96305ab6cab0c172 2024-12-12T05:40:27,674 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/18bb387d118c455fb58588936df915a7 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/18bb387d118c455fb58588936df915a7 2024-12-12T05:40:27,674 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/560f0d518e9d446fa1f91d68add22519 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/560f0d518e9d446fa1f91d68add22519 2024-12-12T05:40:27,674 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/e774c7b0d77e4603be36cd44e7391ab9 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/e774c7b0d77e4603be36cd44e7391ab9 2024-12-12T05:40:27,674 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/241f9945495c4e7db61e4ae96dc81e07 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/241f9945495c4e7db61e4ae96dc81e07 2024-12-12T05:40:27,674 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/0d1a239b49a6408fb3a5266aeeb86998 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/0d1a239b49a6408fb3a5266aeeb86998 2024-12-12T05:40:27,674 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/e616da1bb06649299f5b4129b3c0edec to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/e616da1bb06649299f5b4129b3c0edec 2024-12-12T05:40:27,674 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/ab6ca056a2f94dd59015a31232baf8ea to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/ab6ca056a2f94dd59015a31232baf8ea 2024-12-12T05:40:27,675 DEBUG [StoreCloser-TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/e42b0c386ce24ae99437d76611fff896, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/b0356f9fe30447b0a19ff7f1156ce9aa, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/f61154f9f1a2468ca138373d718e6baa, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/3137cdc9907c4511991f83c39108b981, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/0a955b1ebc664a8b9311c292233aeba6, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/b3463fd0a0c94c3da7fd062f1de9e726, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/98b56d75e3f049ed88120885242850bb, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/bcc2db790fc14a16af140100bb9c14f3, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/f48ad361d8964a79adb9fd0840990a19, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/4fe77eb7e6ae4017bab8f2ae99f93d72, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/e98dfbd145f04347b71b57a73ce8e5d0, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/89e5b2503d7c4805b569bf0205376414, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/bf0051e0f0ff4eef8a0e0967037224d0, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/8d46c9f598604c5fbf0655ca4851717e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/8b623dca5a7d4cba92bb4e2162ecfa6d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/a7b1d79b647542aea95871d482e6a751, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/3977893b7d5b4b95a144c7c3e2fe8f9c, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/80d00d35611f40fd8c4ad11bd9db9140, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/19b245bc5dec4d258a2cfaa733b635e6, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/b5506ec319a14eeda6afb1c7b7e2b6a5, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/22aac9476ee84e0a98c110ac3e7177ce, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/823873033abe4fb6911b82a363b6aab7, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/c721cf52adbc43ca95012b4258e7c82a] to archive 2024-12-12T05:40:27,676 DEBUG [StoreCloser-TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T05:40:27,678 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/f61154f9f1a2468ca138373d718e6baa to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/f61154f9f1a2468ca138373d718e6baa 2024-12-12T05:40:27,678 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/b0356f9fe30447b0a19ff7f1156ce9aa to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/b0356f9fe30447b0a19ff7f1156ce9aa 2024-12-12T05:40:27,678 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/e42b0c386ce24ae99437d76611fff896 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/e42b0c386ce24ae99437d76611fff896 2024-12-12T05:40:27,678 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/b3463fd0a0c94c3da7fd062f1de9e726 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/b3463fd0a0c94c3da7fd062f1de9e726 2024-12-12T05:40:27,678 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/3137cdc9907c4511991f83c39108b981 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/3137cdc9907c4511991f83c39108b981 2024-12-12T05:40:27,679 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/0a955b1ebc664a8b9311c292233aeba6 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/0a955b1ebc664a8b9311c292233aeba6 2024-12-12T05:40:27,679 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/98b56d75e3f049ed88120885242850bb to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/98b56d75e3f049ed88120885242850bb 2024-12-12T05:40:27,679 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/bcc2db790fc14a16af140100bb9c14f3 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/bcc2db790fc14a16af140100bb9c14f3 2024-12-12T05:40:27,679 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/f48ad361d8964a79adb9fd0840990a19 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/f48ad361d8964a79adb9fd0840990a19 2024-12-12T05:40:27,680 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/4fe77eb7e6ae4017bab8f2ae99f93d72 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/4fe77eb7e6ae4017bab8f2ae99f93d72 2024-12-12T05:40:27,680 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/e98dfbd145f04347b71b57a73ce8e5d0 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/e98dfbd145f04347b71b57a73ce8e5d0 2024-12-12T05:40:27,680 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/89e5b2503d7c4805b569bf0205376414 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/89e5b2503d7c4805b569bf0205376414 2024-12-12T05:40:27,680 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/bf0051e0f0ff4eef8a0e0967037224d0 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/bf0051e0f0ff4eef8a0e0967037224d0 2024-12-12T05:40:27,680 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/8b623dca5a7d4cba92bb4e2162ecfa6d to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/8b623dca5a7d4cba92bb4e2162ecfa6d 2024-12-12T05:40:27,681 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/8d46c9f598604c5fbf0655ca4851717e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/8d46c9f598604c5fbf0655ca4851717e 2024-12-12T05:40:27,681 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/a7b1d79b647542aea95871d482e6a751 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/a7b1d79b647542aea95871d482e6a751 2024-12-12T05:40:27,681 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/80d00d35611f40fd8c4ad11bd9db9140 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/80d00d35611f40fd8c4ad11bd9db9140 2024-12-12T05:40:27,681 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/b5506ec319a14eeda6afb1c7b7e2b6a5 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/b5506ec319a14eeda6afb1c7b7e2b6a5 2024-12-12T05:40:27,681 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/19b245bc5dec4d258a2cfaa733b635e6 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/19b245bc5dec4d258a2cfaa733b635e6 2024-12-12T05:40:27,682 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/22aac9476ee84e0a98c110ac3e7177ce to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/22aac9476ee84e0a98c110ac3e7177ce 2024-12-12T05:40:27,682 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/823873033abe4fb6911b82a363b6aab7 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/823873033abe4fb6911b82a363b6aab7 2024-12-12T05:40:27,682 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/3977893b7d5b4b95a144c7c3e2fe8f9c to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/3977893b7d5b4b95a144c7c3e2fe8f9c 2024-12-12T05:40:27,682 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/c721cf52adbc43ca95012b4258e7c82a to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/c721cf52adbc43ca95012b4258e7c82a 2024-12-12T05:40:27,686 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/recovered.edits/363.seqid, newMaxSeqId=363, maxSeqId=1 2024-12-12T05:40:27,686 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f. 2024-12-12T05:40:27,686 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1635): Region close journal for 7b5eb541c096811fd997fc2b7e27d07f: 2024-12-12T05:40:27,687 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(170): Closed 7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:27,688 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=91 updating hbase:meta row=7b5eb541c096811fd997fc2b7e27d07f, regionState=CLOSED 2024-12-12T05:40:27,689 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-12-12T05:40:27,689 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; CloseRegionProcedure 7b5eb541c096811fd997fc2b7e27d07f, server=83e80bf221ca,46457,1733981928566 in 1.4570 sec 2024-12-12T05:40:27,690 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=91, resume processing ppid=90 2024-12-12T05:40:27,690 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, ppid=90, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b5eb541c096811fd997fc2b7e27d07f, UNASSIGN in 1.4600 sec 2024-12-12T05:40:27,691 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-12-12T05:40:27,691 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4630 sec 2024-12-12T05:40:27,692 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733982027692"}]},"ts":"1733982027692"} 2024-12-12T05:40:27,693 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-12T05:40:27,735 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-12T05:40:27,737 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5500 sec 2024-12-12T05:40:28,220 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-12T05:40:28,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-12T05:40:28,298 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-12-12T05:40:28,299 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-12T05:40:28,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:40:28,303 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:40:28,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-12T05:40:28,304 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=93, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:40:28,307 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:28,312 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A, FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B, FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C, FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/recovered.edits] 2024-12-12T05:40:28,317 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/4093c224f1d84719b8b085ac18429adb to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/4093c224f1d84719b8b085ac18429adb 2024-12-12T05:40:28,317 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/6f154fde3d3c480ea9e72c215dbbc54c to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/6f154fde3d3c480ea9e72c215dbbc54c 2024-12-12T05:40:28,317 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/42720aacecb6400da0fae372411c86f4 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/A/42720aacecb6400da0fae372411c86f4 2024-12-12T05:40:28,319 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/0e25d7c5ab664e08acc0c3b1718d5708 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/0e25d7c5ab664e08acc0c3b1718d5708 2024-12-12T05:40:28,319 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/8187fae2039c4638958de5f96b17b695 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/8187fae2039c4638958de5f96b17b695 2024-12-12T05:40:28,319 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/9ceb145738014c0cbdfbca51a821b62e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/B/9ceb145738014c0cbdfbca51a821b62e 2024-12-12T05:40:28,322 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/04e93ab2460540ce9a1c370db63d8da2 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/04e93ab2460540ce9a1c370db63d8da2 2024-12-12T05:40:28,322 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/0123a69dc6d0456db1aa392dced59fd8 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/0123a69dc6d0456db1aa392dced59fd8 2024-12-12T05:40:28,322 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/72aedd45564a4b9dbdeba014b6b2c999 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/C/72aedd45564a4b9dbdeba014b6b2c999 2024-12-12T05:40:28,324 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/recovered.edits/363.seqid to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f/recovered.edits/363.seqid 2024-12-12T05:40:28,325 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7b5eb541c096811fd997fc2b7e27d07f 2024-12-12T05:40:28,325 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-12T05:40:28,326 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=93, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:40:28,329 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-12T05:40:28,330 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-12T05:40:28,331 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=93, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:40:28,331 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-12T05:40:28,331 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733982028331"}]},"ts":"9223372036854775807"} 2024-12-12T05:40:28,333 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-12T05:40:28,333 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 7b5eb541c096811fd997fc2b7e27d07f, NAME => 'TestAcidGuarantees,,1733981998917.7b5eb541c096811fd997fc2b7e27d07f.', STARTKEY => '', ENDKEY => ''}] 2024-12-12T05:40:28,333 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-12T05:40:28,333 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733982028333"}]},"ts":"9223372036854775807"} 2024-12-12T05:40:28,335 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-12T05:40:28,377 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=93, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:40:28,378 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 77 msec 2024-12-12T05:40:28,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-12T05:40:28,405 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 93 completed 2024-12-12T05:40:28,419 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=245 (was 244) - Thread LEAK? -, OpenFileDescriptor=450 (was 451), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=290 (was 268) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=13280 (was 13318) 2024-12-12T05:40:28,427 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=245, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=290, ProcessCount=11, AvailableMemoryMB=13280 2024-12-12T05:40:28,428 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T05:40:28,429 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T05:40:28,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-12T05:40:28,430 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T05:40:28,430 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:40:28,430 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 94 2024-12-12T05:40:28,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-12T05:40:28,431 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T05:40:28,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742158_1334 (size=963) 2024-12-12T05:40:28,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-12T05:40:28,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-12T05:40:28,842 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d 2024-12-12T05:40:28,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742159_1335 (size=53) 2024-12-12T05:40:29,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-12T05:40:29,253 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:40:29,253 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 28540e98a53f5d1213a72e3944e7527f, disabling compactions & flushes 2024-12-12T05:40:29,254 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:29,254 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:29,254 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. after waiting 0 ms 2024-12-12T05:40:29,254 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:29,254 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:29,254 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:29,256 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T05:40:29,257 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733982029256"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733982029256"}]},"ts":"1733982029256"} 2024-12-12T05:40:29,259 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T05:40:29,260 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T05:40:29,260 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733982029260"}]},"ts":"1733982029260"} 2024-12-12T05:40:29,262 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-12T05:40:29,310 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=28540e98a53f5d1213a72e3944e7527f, ASSIGN}] 2024-12-12T05:40:29,311 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=28540e98a53f5d1213a72e3944e7527f, ASSIGN 2024-12-12T05:40:29,312 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=28540e98a53f5d1213a72e3944e7527f, ASSIGN; state=OFFLINE, location=83e80bf221ca,46457,1733981928566; forceNewPlan=false, retain=false 2024-12-12T05:40:29,463 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=28540e98a53f5d1213a72e3944e7527f, regionState=OPENING, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:29,466 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; OpenRegionProcedure 28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566}] 2024-12-12T05:40:29,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-12T05:40:29,620 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:29,626 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:29,626 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(7285): Opening region: {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} 2024-12-12T05:40:29,626 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:29,627 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:40:29,627 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(7327): checking encryption for 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:29,627 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(7330): checking classloading for 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:29,629 INFO [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:29,631 INFO [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T05:40:29,631 INFO [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 28540e98a53f5d1213a72e3944e7527f columnFamilyName A 2024-12-12T05:40:29,632 DEBUG [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:40:29,632 INFO [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] regionserver.HStore(327): Store=28540e98a53f5d1213a72e3944e7527f/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:40:29,633 INFO [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:29,634 INFO [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T05:40:29,634 INFO [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 28540e98a53f5d1213a72e3944e7527f columnFamilyName B 2024-12-12T05:40:29,634 DEBUG [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:40:29,634 INFO [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] regionserver.HStore(327): Store=28540e98a53f5d1213a72e3944e7527f/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:40:29,635 INFO [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:29,635 INFO [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T05:40:29,636 INFO [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 28540e98a53f5d1213a72e3944e7527f columnFamilyName C 2024-12-12T05:40:29,636 DEBUG [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:40:29,636 INFO [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] regionserver.HStore(327): Store=28540e98a53f5d1213a72e3944e7527f/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:40:29,636 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:29,637 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:29,637 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:29,638 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T05:40:29,640 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1085): writing seq id for 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:29,641 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T05:40:29,642 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1102): Opened 28540e98a53f5d1213a72e3944e7527f; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66418801, jitterRate=-0.010282739996910095}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T05:40:29,642 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1001): Region open journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:29,643 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f., pid=96, masterSystemTime=1733982029620 2024-12-12T05:40:29,644 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:29,644 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:29,645 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=28540e98a53f5d1213a72e3944e7527f, regionState=OPEN, openSeqNum=2, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:29,646 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-12-12T05:40:29,647 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; OpenRegionProcedure 28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 in 179 msec 2024-12-12T05:40:29,648 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=94 2024-12-12T05:40:29,648 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=94, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=28540e98a53f5d1213a72e3944e7527f, ASSIGN in 337 msec 2024-12-12T05:40:29,648 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T05:40:29,648 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733982029648"}]},"ts":"1733982029648"} 2024-12-12T05:40:29,649 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-12T05:40:29,661 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T05:40:29,662 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2320 sec 2024-12-12T05:40:30,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-12T05:40:30,541 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 94 completed 2024-12-12T05:40:30,545 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2df33cdf to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@117e86d9 2024-12-12T05:40:30,587 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49e13594, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:30,590 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:30,592 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44466, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:30,593 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T05:40:30,594 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42912, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T05:40:30,596 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T05:40:30,597 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T05:40:30,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=97, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-12T05:40:30,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742160_1336 (size=999) 2024-12-12T05:40:31,012 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-12T05:40:31,012 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-12T05:40:31,018 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=98, ppid=97, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T05:40:31,021 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=28540e98a53f5d1213a72e3944e7527f, REOPEN/MOVE}] 2024-12-12T05:40:31,022 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=28540e98a53f5d1213a72e3944e7527f, REOPEN/MOVE 2024-12-12T05:40:31,023 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=28540e98a53f5d1213a72e3944e7527f, regionState=CLOSING, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:31,024 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T05:40:31,024 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE; CloseRegionProcedure 28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566}] 2024-12-12T05:40:31,176 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:31,176 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] handler.UnassignRegionHandler(124): Close 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:31,177 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T05:40:31,177 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1681): Closing 28540e98a53f5d1213a72e3944e7527f, disabling compactions & flushes 2024-12-12T05:40:31,177 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:31,177 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:31,177 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. after waiting 0 ms 2024-12-12T05:40:31,177 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:31,208 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-12T05:40:31,210 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:31,210 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1635): Region close journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:31,210 WARN [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegionServer(3786): Not adding moved region record: 28540e98a53f5d1213a72e3944e7527f to self. 2024-12-12T05:40:31,213 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] handler.UnassignRegionHandler(170): Closed 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:31,214 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=28540e98a53f5d1213a72e3944e7527f, regionState=CLOSED 2024-12-12T05:40:31,217 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-12-12T05:40:31,218 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; CloseRegionProcedure 28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 in 191 msec 2024-12-12T05:40:31,218 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=28540e98a53f5d1213a72e3944e7527f, REOPEN/MOVE; state=CLOSED, location=83e80bf221ca,46457,1733981928566; forceNewPlan=false, retain=true 2024-12-12T05:40:31,369 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=28540e98a53f5d1213a72e3944e7527f, regionState=OPENING, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:31,372 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=99, state=RUNNABLE; OpenRegionProcedure 28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566}] 2024-12-12T05:40:31,525 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:31,529 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:31,529 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(7285): Opening region: {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} 2024-12-12T05:40:31,529 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:31,530 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:40:31,530 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(7327): checking encryption for 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:31,530 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(7330): checking classloading for 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:31,531 INFO [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:31,533 INFO [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T05:40:31,533 INFO [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 28540e98a53f5d1213a72e3944e7527f columnFamilyName A 2024-12-12T05:40:31,535 DEBUG [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:40:31,535 INFO [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] regionserver.HStore(327): Store=28540e98a53f5d1213a72e3944e7527f/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:40:31,536 INFO [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:31,537 INFO [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T05:40:31,537 INFO [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 28540e98a53f5d1213a72e3944e7527f columnFamilyName B 2024-12-12T05:40:31,537 DEBUG [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:40:31,538 INFO [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] regionserver.HStore(327): Store=28540e98a53f5d1213a72e3944e7527f/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:40:31,538 INFO [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:31,539 INFO [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T05:40:31,539 INFO [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 28540e98a53f5d1213a72e3944e7527f columnFamilyName C 2024-12-12T05:40:31,539 DEBUG [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:40:31,540 INFO [StoreOpener-28540e98a53f5d1213a72e3944e7527f-1 {}] regionserver.HStore(327): Store=28540e98a53f5d1213a72e3944e7527f/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:40:31,540 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:31,541 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:31,543 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:31,546 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T05:40:31,548 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1085): writing seq id for 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:31,549 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1102): Opened 28540e98a53f5d1213a72e3944e7527f; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61067697, jitterRate=-0.09002040326595306}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T05:40:31,550 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1001): Region open journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:31,551 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f., pid=101, masterSystemTime=1733982031525 2024-12-12T05:40:31,552 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:31,552 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:31,553 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=28540e98a53f5d1213a72e3944e7527f, regionState=OPEN, openSeqNum=5, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:31,555 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=99 2024-12-12T05:40:31,555 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=99, state=SUCCESS; OpenRegionProcedure 28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 in 182 msec 2024-12-12T05:40:31,556 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=98 2024-12-12T05:40:31,557 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=98, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=28540e98a53f5d1213a72e3944e7527f, REOPEN/MOVE in 534 msec 2024-12-12T05:40:31,558 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=98, resume processing ppid=97 2024-12-12T05:40:31,558 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, ppid=97, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 539 msec 2024-12-12T05:40:31,560 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 962 msec 2024-12-12T05:40:31,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-12-12T05:40:31,562 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x09f472e0 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6cd96549 2024-12-12T05:40:31,658 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c54a0d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:31,661 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x167a78b0 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@31aea41b 2024-12-12T05:40:31,678 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3875c8c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:31,680 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5aee939b to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1e247aa1 2024-12-12T05:40:31,686 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@801ba40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:31,687 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f49665c to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2205f666 2024-12-12T05:40:31,694 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27539bdc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:31,695 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x683f8469 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6584e9ce 2024-12-12T05:40:31,702 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e3203d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:31,703 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x37ec8e3b to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@798e7fd4 2024-12-12T05:40:31,710 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7819b9e2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:31,711 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x787e5169 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7284f16d 2024-12-12T05:40:31,719 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47679076, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:31,720 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x627cad17 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@37a637ac 2024-12-12T05:40:31,727 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4cb9e50e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:31,728 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x39387e4d to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3fa53591 2024-12-12T05:40:31,736 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cb726fe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:31,737 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x238db126 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3512017b 2024-12-12T05:40:31,744 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@301741f1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:31,749 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:40:31,749 DEBUG [hconnection-0x4f28ff20-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:31,749 DEBUG [hconnection-0x60fd0ab0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:31,749 DEBUG [hconnection-0x58c3be44-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:31,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees 2024-12-12T05:40:31,750 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44468, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:31,750 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44486, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:31,750 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44470, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:31,750 DEBUG [hconnection-0x232b5828-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:31,751 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:40:31,751 DEBUG [hconnection-0x79850ff0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:31,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-12T05:40:31,752 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:40:31,752 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:40:31,752 DEBUG [hconnection-0x57428fec-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:31,752 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44502, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:31,752 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44506, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:31,753 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44518, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:31,755 DEBUG [hconnection-0x668b1085-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:31,756 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44524, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:31,757 DEBUG [hconnection-0x256ec45f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:31,758 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44540, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:31,759 DEBUG [hconnection-0x7bcb83be-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:31,760 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44552, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:31,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:31,760 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 28540e98a53f5d1213a72e3944e7527f 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T05:40:31,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=A 2024-12-12T05:40:31,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:31,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=B 2024-12-12T05:40:31,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:31,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=C 2024-12-12T05:40:31,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:31,767 DEBUG [hconnection-0x2813abac-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:31,769 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44558, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:31,773 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:31,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982091771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:31,774 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:31,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982091772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:31,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:31,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 3 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982091772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:31,774 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:31,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982091773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:31,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:31,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982091774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:31,802 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412124f1589e93f41460698482ee9f37f6f4f_28540e98a53f5d1213a72e3944e7527f is 50, key is test_row_0/A:col10/1733982031760/Put/seqid=0 2024-12-12T05:40:31,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742161_1337 (size=12154) 2024-12-12T05:40:31,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-12T05:40:31,875 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:31,875 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:31,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982091875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:31,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982091875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:31,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:31,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982091875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:31,876 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:31,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982091875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:31,876 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:31,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982091875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:31,904 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:31,904 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-12T05:40:31,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:31,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:31,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:31,904 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:31,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:31,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:32,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-12T05:40:32,056 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:32,056 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-12T05:40:32,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:32,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:32,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:32,057 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:32,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:32,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:32,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:32,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982092077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:32,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:32,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982092077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:32,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:32,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982092077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:32,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:32,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982092078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:32,082 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:32,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982092078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:32,208 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:40:32,208 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:32,209 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-12T05:40:32,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:32,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:32,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:32,209 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:32,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:32,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:32,211 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412124f1589e93f41460698482ee9f37f6f4f_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412124f1589e93f41460698482ee9f37f6f4f_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:32,212 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/41ead117df4246d7831fc859db2c99a4, store: [table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:32,213 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/41ead117df4246d7831fc859db2c99a4 is 175, key is test_row_0/A:col10/1733982031760/Put/seqid=0 2024-12-12T05:40:32,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742162_1338 (size=30955) 2024-12-12T05:40:32,223 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=18, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/41ead117df4246d7831fc859db2c99a4 2024-12-12T05:40:32,249 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/43efc29309b949598dda677f14cea6bb is 50, key is test_row_0/B:col10/1733982031760/Put/seqid=0 2024-12-12T05:40:32,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742163_1339 (size=12001) 2024-12-12T05:40:32,258 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/43efc29309b949598dda677f14cea6bb 2024-12-12T05:40:32,282 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/1050a2343c7b47d6acfa51ad9f7c39d3 is 50, key is test_row_0/C:col10/1733982031760/Put/seqid=0 2024-12-12T05:40:32,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742164_1340 (size=12001) 2024-12-12T05:40:32,291 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/1050a2343c7b47d6acfa51ad9f7c39d3 2024-12-12T05:40:32,297 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/41ead117df4246d7831fc859db2c99a4 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/41ead117df4246d7831fc859db2c99a4 2024-12-12T05:40:32,300 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/41ead117df4246d7831fc859db2c99a4, entries=150, sequenceid=18, filesize=30.2 K 2024-12-12T05:40:32,301 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/43efc29309b949598dda677f14cea6bb as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/43efc29309b949598dda677f14cea6bb 2024-12-12T05:40:32,305 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/43efc29309b949598dda677f14cea6bb, entries=150, sequenceid=18, filesize=11.7 K 2024-12-12T05:40:32,306 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/1050a2343c7b47d6acfa51ad9f7c39d3 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/1050a2343c7b47d6acfa51ad9f7c39d3 2024-12-12T05:40:32,312 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/1050a2343c7b47d6acfa51ad9f7c39d3, entries=150, sequenceid=18, filesize=11.7 K 2024-12-12T05:40:32,313 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 28540e98a53f5d1213a72e3944e7527f in 553ms, sequenceid=18, compaction requested=false 2024-12-12T05:40:32,313 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:32,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-12T05:40:32,361 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:32,361 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-12T05:40:32,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:32,361 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2837): Flushing 28540e98a53f5d1213a72e3944e7527f 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-12T05:40:32,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=A 2024-12-12T05:40:32,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:32,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=B 2024-12-12T05:40:32,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:32,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=C 2024-12-12T05:40:32,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:32,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212723df5f454db45d09ed86d97a811e8ec_28540e98a53f5d1213a72e3944e7527f is 50, key is test_row_0/A:col10/1733982031773/Put/seqid=0 2024-12-12T05:40:32,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742165_1341 (size=12154) 2024-12-12T05:40:32,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:32,383 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:32,391 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:32,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982092387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:32,391 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:32,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982092388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:32,392 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:32,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982092388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:32,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:32,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982092389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:32,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:32,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982092390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:32,494 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:32,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982092492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:32,494 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:32,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982092492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:32,494 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:32,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982092492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:32,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:32,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982092494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:32,497 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:32,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982092494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:32,697 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:32,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982092695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:32,698 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:32,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982092695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:32,698 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:32,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982092695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:32,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:32,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982092699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:32,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:32,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982092700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:32,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:40:32,775 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212723df5f454db45d09ed86d97a811e8ec_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212723df5f454db45d09ed86d97a811e8ec_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:32,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/8ee55d90f0f44dfa9fda1cb5b13304ee, store: [table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:32,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/8ee55d90f0f44dfa9fda1cb5b13304ee is 175, key is test_row_0/A:col10/1733982031773/Put/seqid=0 2024-12-12T05:40:32,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742166_1342 (size=30955) 2024-12-12T05:40:32,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-12T05:40:33,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:33,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982093000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:33,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:33,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982093000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:33,001 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:33,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982093000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:33,007 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:33,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982093004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:33,007 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:33,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982093005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:33,180 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/8ee55d90f0f44dfa9fda1cb5b13304ee 2024-12-12T05:40:33,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/711a419f5bca44649a94f1d8453a5d39 is 50, key is test_row_0/B:col10/1733982031773/Put/seqid=0 2024-12-12T05:40:33,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742167_1343 (size=12001) 2024-12-12T05:40:33,215 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-12T05:40:33,504 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:33,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982093502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:33,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:33,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982093504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:33,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:33,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982093505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:33,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:33,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982093509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:33,512 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:33,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982093509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:33,594 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/711a419f5bca44649a94f1d8453a5d39 2024-12-12T05:40:33,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/677658c166824a6e94e516ca4926a79e is 50, key is test_row_0/C:col10/1733982031773/Put/seqid=0 2024-12-12T05:40:33,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742168_1344 (size=12001) 2024-12-12T05:40:33,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-12T05:40:34,010 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/677658c166824a6e94e516ca4926a79e 2024-12-12T05:40:34,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/8ee55d90f0f44dfa9fda1cb5b13304ee as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/8ee55d90f0f44dfa9fda1cb5b13304ee 2024-12-12T05:40:34,016 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/8ee55d90f0f44dfa9fda1cb5b13304ee, entries=150, sequenceid=41, filesize=30.2 K 2024-12-12T05:40:34,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/711a419f5bca44649a94f1d8453a5d39 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/711a419f5bca44649a94f1d8453a5d39 2024-12-12T05:40:34,020 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/711a419f5bca44649a94f1d8453a5d39, entries=150, sequenceid=41, filesize=11.7 K 2024-12-12T05:40:34,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/677658c166824a6e94e516ca4926a79e as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/677658c166824a6e94e516ca4926a79e 2024-12-12T05:40:34,023 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/677658c166824a6e94e516ca4926a79e, entries=150, sequenceid=41, filesize=11.7 K 2024-12-12T05:40:34,024 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 28540e98a53f5d1213a72e3944e7527f in 1663ms, sequenceid=41, compaction requested=false 2024-12-12T05:40:34,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2538): Flush status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:34,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:34,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-12-12T05:40:34,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=103 2024-12-12T05:40:34,026 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-12-12T05:40:34,026 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2730 sec 2024-12-12T05:40:34,028 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees in 2.2780 sec 2024-12-12T05:40:34,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:34,510 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 28540e98a53f5d1213a72e3944e7527f 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-12T05:40:34,511 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=A 2024-12-12T05:40:34,511 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:34,511 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=B 2024-12-12T05:40:34,511 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:34,511 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=C 2024-12-12T05:40:34,511 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:34,516 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212cc2ed61ec73a4a39b5d9c1b2fe5c5e90_28540e98a53f5d1213a72e3944e7527f is 50, key is test_row_0/A:col10/1733982032389/Put/seqid=0 2024-12-12T05:40:34,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742169_1345 (size=14594) 2024-12-12T05:40:34,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:34,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982094538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:34,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:34,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982094538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:34,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:34,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982094539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:34,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:34,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982094539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:34,550 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:34,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982094544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:34,649 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:34,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982094647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:34,649 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:34,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982094647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:34,649 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:34,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982094647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:34,649 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:34,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982094647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:34,656 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:34,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982094651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:34,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:34,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982094851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:34,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:34,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982094851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:34,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:34,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982094851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:34,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:34,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982094851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:34,859 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:34,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982094856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:34,920 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:40:34,923 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212cc2ed61ec73a4a39b5d9c1b2fe5c5e90_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212cc2ed61ec73a4a39b5d9c1b2fe5c5e90_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:34,923 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/e0ce7d27bc504b64b04a7b0a6ef1516a, store: [table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:34,924 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/e0ce7d27bc504b64b04a7b0a6ef1516a is 175, key is test_row_0/A:col10/1733982032389/Put/seqid=0 2024-12-12T05:40:34,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742170_1346 (size=39549) 2024-12-12T05:40:35,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:35,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982095154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:35,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:35,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982095155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:35,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:35,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982095155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:35,158 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:35,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982095156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:35,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:35,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982095162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:35,327 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=55, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/e0ce7d27bc504b64b04a7b0a6ef1516a 2024-12-12T05:40:35,333 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/c026212de3f348638edd41a64bdc0d8b is 50, key is test_row_0/B:col10/1733982032389/Put/seqid=0 2024-12-12T05:40:35,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742171_1347 (size=12001) 2024-12-12T05:40:35,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:35,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982095658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:35,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:35,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982095659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:35,663 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:35,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982095661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:35,663 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:35,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982095662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:35,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:35,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982095666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:35,737 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/c026212de3f348638edd41a64bdc0d8b 2024-12-12T05:40:35,742 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/7e20e3637ca54ab180c85e4d759a5e3f is 50, key is test_row_0/C:col10/1733982032389/Put/seqid=0 2024-12-12T05:40:35,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742172_1348 (size=12001) 2024-12-12T05:40:35,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-12T05:40:35,855 INFO [Thread-1535 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 102 completed 2024-12-12T05:40:35,856 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:40:35,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees 2024-12-12T05:40:35,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-12T05:40:35,857 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:40:35,858 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:40:35,858 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:40:35,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-12T05:40:36,009 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:36,009 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-12T05:40:36,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:36,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:36,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:36,010 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:36,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:36,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:36,145 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/7e20e3637ca54ab180c85e4d759a5e3f 2024-12-12T05:40:36,148 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/e0ce7d27bc504b64b04a7b0a6ef1516a as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/e0ce7d27bc504b64b04a7b0a6ef1516a 2024-12-12T05:40:36,151 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/e0ce7d27bc504b64b04a7b0a6ef1516a, entries=200, sequenceid=55, filesize=38.6 K 2024-12-12T05:40:36,152 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/c026212de3f348638edd41a64bdc0d8b as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/c026212de3f348638edd41a64bdc0d8b 2024-12-12T05:40:36,154 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/c026212de3f348638edd41a64bdc0d8b, entries=150, sequenceid=55, filesize=11.7 K 2024-12-12T05:40:36,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/7e20e3637ca54ab180c85e4d759a5e3f as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/7e20e3637ca54ab180c85e4d759a5e3f 2024-12-12T05:40:36,158 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/7e20e3637ca54ab180c85e4d759a5e3f, entries=150, sequenceid=55, filesize=11.7 K 2024-12-12T05:40:36,158 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 28540e98a53f5d1213a72e3944e7527f in 1648ms, sequenceid=55, compaction requested=true 2024-12-12T05:40:36,158 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:36,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-12T05:40:36,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 28540e98a53f5d1213a72e3944e7527f:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:40:36,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:36,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 28540e98a53f5d1213a72e3944e7527f:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:40:36,159 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:36,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:40:36,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 28540e98a53f5d1213a72e3944e7527f:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:40:36,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T05:40:36,159 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:36,159 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:36,159 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 28540e98a53f5d1213a72e3944e7527f/A is initiating minor compaction (all files) 2024-12-12T05:40:36,159 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 28540e98a53f5d1213a72e3944e7527f/A in TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:36,160 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/41ead117df4246d7831fc859db2c99a4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/8ee55d90f0f44dfa9fda1cb5b13304ee, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/e0ce7d27bc504b64b04a7b0a6ef1516a] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp, totalSize=99.1 K 2024-12-12T05:40:36,160 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:36,160 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. files: [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/41ead117df4246d7831fc859db2c99a4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/8ee55d90f0f44dfa9fda1cb5b13304ee, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/e0ce7d27bc504b64b04a7b0a6ef1516a] 2024-12-12T05:40:36,160 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 41ead117df4246d7831fc859db2c99a4, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1733982031756 2024-12-12T05:40:36,160 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:36,160 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 28540e98a53f5d1213a72e3944e7527f/B is initiating minor compaction (all files) 2024-12-12T05:40:36,160 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ee55d90f0f44dfa9fda1cb5b13304ee, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733982031771 2024-12-12T05:40:36,160 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 28540e98a53f5d1213a72e3944e7527f/B in TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:36,160 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/43efc29309b949598dda677f14cea6bb, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/711a419f5bca44649a94f1d8453a5d39, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/c026212de3f348638edd41a64bdc0d8b] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp, totalSize=35.2 K 2024-12-12T05:40:36,160 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting e0ce7d27bc504b64b04a7b0a6ef1516a, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733982032386 2024-12-12T05:40:36,160 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 43efc29309b949598dda677f14cea6bb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1733982031756 2024-12-12T05:40:36,161 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 711a419f5bca44649a94f1d8453a5d39, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733982031771 2024-12-12T05:40:36,161 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting c026212de3f348638edd41a64bdc0d8b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733982032389 2024-12-12T05:40:36,161 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:36,162 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-12T05:40:36,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:36,162 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2837): Flushing 28540e98a53f5d1213a72e3944e7527f 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-12T05:40:36,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=A 2024-12-12T05:40:36,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:36,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=B 2024-12-12T05:40:36,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:36,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=C 2024-12-12T05:40:36,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:36,166 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:36,167 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 28540e98a53f5d1213a72e3944e7527f#B#compaction#294 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:36,168 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/214a56f00b334dd7bdf3829ced0193fa is 50, key is test_row_0/B:col10/1733982032389/Put/seqid=0 2024-12-12T05:40:36,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121267f437459fb04f37a4e848aa034c46af_28540e98a53f5d1213a72e3944e7527f is 50, key is test_row_0/A:col10/1733982034538/Put/seqid=0 2024-12-12T05:40:36,172 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412127fb8f441e8cd4f70bf277399bbb1c9ad_28540e98a53f5d1213a72e3944e7527f store=[table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:36,173 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412127fb8f441e8cd4f70bf277399bbb1c9ad_28540e98a53f5d1213a72e3944e7527f, store=[table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:36,173 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412127fb8f441e8cd4f70bf277399bbb1c9ad_28540e98a53f5d1213a72e3944e7527f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:36,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742173_1349 (size=12104) 2024-12-12T05:40:36,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742174_1350 (size=12154) 2024-12-12T05:40:36,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742175_1351 (size=4469) 2024-12-12T05:40:36,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-12T05:40:36,586 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/214a56f00b334dd7bdf3829ced0193fa as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/214a56f00b334dd7bdf3829ced0193fa 2024-12-12T05:40:36,589 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 28540e98a53f5d1213a72e3944e7527f/B of 28540e98a53f5d1213a72e3944e7527f into 214a56f00b334dd7bdf3829ced0193fa(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:36,589 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:36,589 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f., storeName=28540e98a53f5d1213a72e3944e7527f/B, priority=13, startTime=1733982036158; duration=0sec 2024-12-12T05:40:36,589 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:40:36,589 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 28540e98a53f5d1213a72e3944e7527f:B 2024-12-12T05:40:36,589 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:36,590 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:36,590 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 28540e98a53f5d1213a72e3944e7527f/C is initiating minor compaction (all files) 2024-12-12T05:40:36,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:40:36,590 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 28540e98a53f5d1213a72e3944e7527f/C in TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:36,590 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/1050a2343c7b47d6acfa51ad9f7c39d3, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/677658c166824a6e94e516ca4926a79e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/7e20e3637ca54ab180c85e4d759a5e3f] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp, totalSize=35.2 K 2024-12-12T05:40:36,591 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 1050a2343c7b47d6acfa51ad9f7c39d3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1733982031756 2024-12-12T05:40:36,591 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 677658c166824a6e94e516ca4926a79e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733982031771 2024-12-12T05:40:36,591 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e20e3637ca54ab180c85e4d759a5e3f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733982032389 2024-12-12T05:40:36,593 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121267f437459fb04f37a4e848aa034c46af_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121267f437459fb04f37a4e848aa034c46af_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:36,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/9fcc26504ca549358c4bf878e358e99a, store: [table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:36,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/9fcc26504ca549358c4bf878e358e99a is 175, key is test_row_0/A:col10/1733982034538/Put/seqid=0 2024-12-12T05:40:36,598 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 28540e98a53f5d1213a72e3944e7527f#A#compaction#295 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:36,598 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/e3f54cb8145a46b5831b6c662ce2af7b is 175, key is test_row_0/A:col10/1733982032389/Put/seqid=0 2024-12-12T05:40:36,612 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 28540e98a53f5d1213a72e3944e7527f#C#compaction#297 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:36,612 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/197e0becccf5483ab4ab699278d302d5 is 50, key is test_row_0/C:col10/1733982032389/Put/seqid=0 2024-12-12T05:40:36,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742177_1353 (size=31058) 2024-12-12T05:40:36,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742176_1352 (size=30955) 2024-12-12T05:40:36,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742178_1354 (size=12104) 2024-12-12T05:40:36,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:36,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:36,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:36,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982096676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:36,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:36,682 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:36,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982096677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:36,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982096676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:36,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:36,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982096678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:36,686 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:36,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982096680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:36,787 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:36,787 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:36,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982096783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:36,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982096783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:36,787 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:36,787 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:36,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982096783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:36,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982096783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:36,790 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:36,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982096787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:36,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-12T05:40:36,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:36,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982096988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:36,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:36,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982096988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:36,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:36,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982096989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:36,992 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:36,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982096989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:36,994 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:36,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982096990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:37,016 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=77, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/9fcc26504ca549358c4bf878e358e99a 2024-12-12T05:40:37,019 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/e3f54cb8145a46b5831b6c662ce2af7b as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/e3f54cb8145a46b5831b6c662ce2af7b 2024-12-12T05:40:37,023 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 28540e98a53f5d1213a72e3944e7527f/A of 28540e98a53f5d1213a72e3944e7527f into e3f54cb8145a46b5831b6c662ce2af7b(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:37,023 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:37,023 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f., storeName=28540e98a53f5d1213a72e3944e7527f/A, priority=13, startTime=1733982036158; duration=0sec 2024-12-12T05:40:37,023 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:37,023 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 28540e98a53f5d1213a72e3944e7527f:A 2024-12-12T05:40:37,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/0a766f3e1c6d415d980e37d7451d97b4 is 50, key is test_row_0/B:col10/1733982034538/Put/seqid=0 2024-12-12T05:40:37,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742179_1355 (size=12001) 2024-12-12T05:40:37,032 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/0a766f3e1c6d415d980e37d7451d97b4 2024-12-12T05:40:37,033 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/197e0becccf5483ab4ab699278d302d5 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/197e0becccf5483ab4ab699278d302d5 2024-12-12T05:40:37,036 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 28540e98a53f5d1213a72e3944e7527f/C of 28540e98a53f5d1213a72e3944e7527f into 197e0becccf5483ab4ab699278d302d5(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:37,036 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:37,036 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f., storeName=28540e98a53f5d1213a72e3944e7527f/C, priority=13, startTime=1733982036159; duration=0sec 2024-12-12T05:40:37,036 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:37,036 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 28540e98a53f5d1213a72e3944e7527f:C 2024-12-12T05:40:37,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/9a0e61227c3f4a9a82748bdf469e7cf2 is 50, key is test_row_0/C:col10/1733982034538/Put/seqid=0 2024-12-12T05:40:37,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742180_1356 (size=12001) 2024-12-12T05:40:37,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:37,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982097292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:37,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:37,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982097293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:37,296 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:37,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982097294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:37,296 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:37,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982097294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:37,298 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:37,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982097295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:37,442 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/9a0e61227c3f4a9a82748bdf469e7cf2 2024-12-12T05:40:37,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/9fcc26504ca549358c4bf878e358e99a as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/9fcc26504ca549358c4bf878e358e99a 2024-12-12T05:40:37,448 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/9fcc26504ca549358c4bf878e358e99a, entries=150, sequenceid=77, filesize=30.2 K 2024-12-12T05:40:37,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/0a766f3e1c6d415d980e37d7451d97b4 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/0a766f3e1c6d415d980e37d7451d97b4 2024-12-12T05:40:37,451 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/0a766f3e1c6d415d980e37d7451d97b4, entries=150, sequenceid=77, filesize=11.7 K 2024-12-12T05:40:37,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/9a0e61227c3f4a9a82748bdf469e7cf2 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/9a0e61227c3f4a9a82748bdf469e7cf2 2024-12-12T05:40:37,455 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/9a0e61227c3f4a9a82748bdf469e7cf2, entries=150, sequenceid=77, filesize=11.7 K 2024-12-12T05:40:37,455 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 28540e98a53f5d1213a72e3944e7527f in 1293ms, sequenceid=77, compaction requested=false 2024-12-12T05:40:37,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:37,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:37,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-12T05:40:37,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-12-12T05:40:37,457 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-12-12T05:40:37,457 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5980 sec 2024-12-12T05:40:37,458 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees in 1.6020 sec 2024-12-12T05:40:37,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:37,799 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 28540e98a53f5d1213a72e3944e7527f 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-12T05:40:37,799 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=A 2024-12-12T05:40:37,799 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:37,799 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=B 2024-12-12T05:40:37,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:37,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=C 2024-12-12T05:40:37,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:37,805 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412123000542e5e6347c9bd8c4b062ff0316c_28540e98a53f5d1213a72e3944e7527f is 50, key is test_row_0/A:col10/1733982036674/Put/seqid=0 2024-12-12T05:40:37,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742181_1357 (size=14594) 2024-12-12T05:40:37,821 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:37,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982097816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:37,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:37,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982097816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:37,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:37,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982097817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:37,830 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:37,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982097821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:37,830 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:37,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982097821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:37,926 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:37,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982097922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:37,926 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:37,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982097922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:37,926 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:37,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982097923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:37,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:37,934 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:37,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982097931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:37,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982097930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:37,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-12T05:40:37,960 INFO [Thread-1535 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-12-12T05:40:37,961 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:40:37,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-12-12T05:40:37,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-12T05:40:37,962 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:40:37,963 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:40:37,963 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:40:38,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-12T05:40:38,114 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:38,114 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-12T05:40:38,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:38,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:38,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:38,115 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:38,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:38,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:38,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:38,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982098127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:38,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:38,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982098127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:38,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:38,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982098128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:38,137 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:38,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982098135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:38,138 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:38,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982098136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:38,213 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:40:38,215 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412123000542e5e6347c9bd8c4b062ff0316c_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123000542e5e6347c9bd8c4b062ff0316c_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:38,216 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/4ce3ff16c2724a289b60554c5ed9f8d2, store: [table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:38,217 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/4ce3ff16c2724a289b60554c5ed9f8d2 is 175, key is test_row_0/A:col10/1733982036674/Put/seqid=0 2024-12-12T05:40:38,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742182_1358 (size=39549) 2024-12-12T05:40:38,219 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-12T05:40:38,219 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-12T05:40:38,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-12T05:40:38,266 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:38,266 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-12T05:40:38,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:38,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:38,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:38,267 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:38,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:38,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:38,418 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:38,419 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-12T05:40:38,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:38,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:38,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:38,419 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:38,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:38,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:38,434 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:38,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982098431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:38,434 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:38,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982098431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:38,435 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:38,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982098432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:38,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:38,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982098440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:38,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:38,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982098441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:38,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-12T05:40:38,571 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:38,571 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-12T05:40:38,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:38,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:38,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:38,571 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:38,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:38,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:38,620 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=95, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/4ce3ff16c2724a289b60554c5ed9f8d2 2024-12-12T05:40:38,626 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/e2a9b61a35954e7fb0988e41952407bb is 50, key is test_row_0/B:col10/1733982036674/Put/seqid=0 2024-12-12T05:40:38,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742183_1359 (size=12001) 2024-12-12T05:40:38,634 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/e2a9b61a35954e7fb0988e41952407bb 2024-12-12T05:40:38,640 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/66ce99667eed425287e27636e9e77464 is 50, key is test_row_0/C:col10/1733982036674/Put/seqid=0 2024-12-12T05:40:38,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742184_1360 (size=12001) 2024-12-12T05:40:38,723 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:38,723 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-12T05:40:38,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:38,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:38,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:38,723 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:38,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:38,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:38,875 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:38,875 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-12T05:40:38,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:38,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:38,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:38,876 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:38,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:38,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:38,939 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:38,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982098936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:38,939 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:38,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982098938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:38,942 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:38,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982098939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:38,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:38,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982098944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:38,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:38,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982098944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:39,028 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:39,028 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-12T05:40:39,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:39,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:39,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:39,028 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:39,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:39,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:39,044 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/66ce99667eed425287e27636e9e77464 2024-12-12T05:40:39,047 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/4ce3ff16c2724a289b60554c5ed9f8d2 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/4ce3ff16c2724a289b60554c5ed9f8d2 2024-12-12T05:40:39,050 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/4ce3ff16c2724a289b60554c5ed9f8d2, entries=200, sequenceid=95, filesize=38.6 K 2024-12-12T05:40:39,051 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/e2a9b61a35954e7fb0988e41952407bb as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/e2a9b61a35954e7fb0988e41952407bb 2024-12-12T05:40:39,054 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/e2a9b61a35954e7fb0988e41952407bb, entries=150, sequenceid=95, filesize=11.7 K 2024-12-12T05:40:39,054 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/66ce99667eed425287e27636e9e77464 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/66ce99667eed425287e27636e9e77464 2024-12-12T05:40:39,058 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/66ce99667eed425287e27636e9e77464, entries=150, sequenceid=95, filesize=11.7 K 2024-12-12T05:40:39,059 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 28540e98a53f5d1213a72e3944e7527f in 1260ms, sequenceid=95, compaction requested=true 2024-12-12T05:40:39,059 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:39,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 28540e98a53f5d1213a72e3944e7527f:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:40:39,059 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:39,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:39,059 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:39,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 28540e98a53f5d1213a72e3944e7527f:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:40:39,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:39,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 28540e98a53f5d1213a72e3944e7527f:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:40:39,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:40:39,060 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101562 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:39,060 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:39,060 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 28540e98a53f5d1213a72e3944e7527f/A is initiating minor compaction (all files) 2024-12-12T05:40:39,060 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 28540e98a53f5d1213a72e3944e7527f/B is initiating minor compaction (all files) 2024-12-12T05:40:39,060 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 28540e98a53f5d1213a72e3944e7527f/B in TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:39,060 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 28540e98a53f5d1213a72e3944e7527f/A in TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:39,060 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/214a56f00b334dd7bdf3829ced0193fa, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/0a766f3e1c6d415d980e37d7451d97b4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/e2a9b61a35954e7fb0988e41952407bb] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp, totalSize=35.3 K 2024-12-12T05:40:39,060 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/e3f54cb8145a46b5831b6c662ce2af7b, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/9fcc26504ca549358c4bf878e358e99a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/4ce3ff16c2724a289b60554c5ed9f8d2] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp, totalSize=99.2 K 2024-12-12T05:40:39,060 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:39,060 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. files: [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/e3f54cb8145a46b5831b6c662ce2af7b, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/9fcc26504ca549358c4bf878e358e99a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/4ce3ff16c2724a289b60554c5ed9f8d2] 2024-12-12T05:40:39,061 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 214a56f00b334dd7bdf3829ced0193fa, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733982032389 2024-12-12T05:40:39,061 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting e3f54cb8145a46b5831b6c662ce2af7b, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733982032389 2024-12-12T05:40:39,061 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a766f3e1c6d415d980e37d7451d97b4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733982034528 2024-12-12T05:40:39,061 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9fcc26504ca549358c4bf878e358e99a, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733982034528 2024-12-12T05:40:39,061 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting e2a9b61a35954e7fb0988e41952407bb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733982036674 2024-12-12T05:40:39,061 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ce3ff16c2724a289b60554c5ed9f8d2, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733982036674 2024-12-12T05:40:39,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-12T05:40:39,066 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:39,067 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 28540e98a53f5d1213a72e3944e7527f#B#compaction#304 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:39,068 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212d63e9b64940a40faaad279c34b9f7dce_28540e98a53f5d1213a72e3944e7527f store=[table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:39,068 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/14d277a65f5b4a5cba952ec0597c3c32 is 50, key is test_row_0/B:col10/1733982036674/Put/seqid=0 2024-12-12T05:40:39,069 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212d63e9b64940a40faaad279c34b9f7dce_28540e98a53f5d1213a72e3944e7527f, store=[table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:39,069 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212d63e9b64940a40faaad279c34b9f7dce_28540e98a53f5d1213a72e3944e7527f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:39,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742185_1361 (size=12207) 2024-12-12T05:40:39,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742186_1362 (size=4469) 2024-12-12T05:40:39,180 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:39,180 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-12T05:40:39,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:39,181 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing 28540e98a53f5d1213a72e3944e7527f 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-12T05:40:39,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=A 2024-12-12T05:40:39,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:39,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=B 2024-12-12T05:40:39,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:39,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=C 2024-12-12T05:40:39,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:39,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121227b05fe861ff4356b92723f9ab336246_28540e98a53f5d1213a72e3944e7527f is 50, key is test_row_0/A:col10/1733982037820/Put/seqid=0 2024-12-12T05:40:39,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742187_1363 (size=12154) 2024-12-12T05:40:39,483 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/14d277a65f5b4a5cba952ec0597c3c32 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/14d277a65f5b4a5cba952ec0597c3c32 2024-12-12T05:40:39,486 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 28540e98a53f5d1213a72e3944e7527f/B of 28540e98a53f5d1213a72e3944e7527f into 14d277a65f5b4a5cba952ec0597c3c32(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:39,486 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:39,486 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f., storeName=28540e98a53f5d1213a72e3944e7527f/B, priority=13, startTime=1733982039059; duration=0sec 2024-12-12T05:40:39,486 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:40:39,486 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 28540e98a53f5d1213a72e3944e7527f:B 2024-12-12T05:40:39,486 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:39,487 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:39,487 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 28540e98a53f5d1213a72e3944e7527f/C is initiating minor compaction (all files) 2024-12-12T05:40:39,487 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 28540e98a53f5d1213a72e3944e7527f/C in TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:39,487 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/197e0becccf5483ab4ab699278d302d5, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/9a0e61227c3f4a9a82748bdf469e7cf2, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/66ce99667eed425287e27636e9e77464] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp, totalSize=35.3 K 2024-12-12T05:40:39,488 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 197e0becccf5483ab4ab699278d302d5, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733982032389 2024-12-12T05:40:39,488 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 9a0e61227c3f4a9a82748bdf469e7cf2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733982034528 2024-12-12T05:40:39,488 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 66ce99667eed425287e27636e9e77464, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733982036674 2024-12-12T05:40:39,488 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 28540e98a53f5d1213a72e3944e7527f#A#compaction#303 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:39,489 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/d1e6ac131a084b7d9a30b50250c7bbe4 is 175, key is test_row_0/A:col10/1733982036674/Put/seqid=0 2024-12-12T05:40:39,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742188_1364 (size=31161) 2024-12-12T05:40:39,494 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 28540e98a53f5d1213a72e3944e7527f#C#compaction#306 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:39,495 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/e241ccd1b5464c26bf8831d1e5b1a4e2 is 50, key is test_row_0/C:col10/1733982036674/Put/seqid=0 2024-12-12T05:40:39,497 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/d1e6ac131a084b7d9a30b50250c7bbe4 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/d1e6ac131a084b7d9a30b50250c7bbe4 2024-12-12T05:40:39,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742189_1365 (size=12207) 2024-12-12T05:40:39,501 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 28540e98a53f5d1213a72e3944e7527f/A of 28540e98a53f5d1213a72e3944e7527f into d1e6ac131a084b7d9a30b50250c7bbe4(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:39,501 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:39,501 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f., storeName=28540e98a53f5d1213a72e3944e7527f/A, priority=13, startTime=1733982039059; duration=0sec 2024-12-12T05:40:39,501 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:39,501 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 28540e98a53f5d1213a72e3944e7527f:A 2024-12-12T05:40:39,504 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/e241ccd1b5464c26bf8831d1e5b1a4e2 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/e241ccd1b5464c26bf8831d1e5b1a4e2 2024-12-12T05:40:39,507 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 28540e98a53f5d1213a72e3944e7527f/C of 28540e98a53f5d1213a72e3944e7527f into e241ccd1b5464c26bf8831d1e5b1a4e2(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:39,507 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:39,507 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f., storeName=28540e98a53f5d1213a72e3944e7527f/C, priority=13, startTime=1733982039059; duration=0sec 2024-12-12T05:40:39,507 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:39,507 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 28540e98a53f5d1213a72e3944e7527f:C 2024-12-12T05:40:39,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:40:39,595 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121227b05fe861ff4356b92723f9ab336246_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121227b05fe861ff4356b92723f9ab336246_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:39,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/1b0d8a0fe65c4c4480a3b491339780a7, store: [table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:39,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/1b0d8a0fe65c4c4480a3b491339780a7 is 175, key is test_row_0/A:col10/1733982037820/Put/seqid=0 2024-12-12T05:40:39,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742190_1366 (size=30955) 2024-12-12T05:40:39,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:39,944 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:39,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:39,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982099954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:39,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:39,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982099955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:39,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:39,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982099957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:39,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:39,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982099958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:39,962 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:39,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982099959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:40,003 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=116, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/1b0d8a0fe65c4c4480a3b491339780a7 2024-12-12T05:40:40,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/53f107ab2ad045da85b27ba9f3df217c is 50, key is test_row_0/B:col10/1733982037820/Put/seqid=0 2024-12-12T05:40:40,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742191_1367 (size=12001) 2024-12-12T05:40:40,062 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:40,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982100059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:40,062 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:40,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982100061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:40,064 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:40,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982100062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:40,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-12T05:40:40,066 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:40,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982100063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:40,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:40,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982100263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:40,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:40,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982100263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:40,267 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:40,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982100266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:40,270 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:40,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982100268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:40,423 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/53f107ab2ad045da85b27ba9f3df217c 2024-12-12T05:40:40,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/2e16b26fa61a43a997d100014d6d8dd9 is 50, key is test_row_0/C:col10/1733982037820/Put/seqid=0 2024-12-12T05:40:40,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742192_1368 (size=12001) 2024-12-12T05:40:40,567 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:40,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982100565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:40,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:40,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982100567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:40,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:40,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982100569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:40,574 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:40,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982100573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:40,833 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/2e16b26fa61a43a997d100014d6d8dd9 2024-12-12T05:40:40,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/1b0d8a0fe65c4c4480a3b491339780a7 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/1b0d8a0fe65c4c4480a3b491339780a7 2024-12-12T05:40:40,839 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/1b0d8a0fe65c4c4480a3b491339780a7, entries=150, sequenceid=116, filesize=30.2 K 2024-12-12T05:40:40,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/53f107ab2ad045da85b27ba9f3df217c as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/53f107ab2ad045da85b27ba9f3df217c 2024-12-12T05:40:40,843 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/53f107ab2ad045da85b27ba9f3df217c, entries=150, sequenceid=116, filesize=11.7 K 2024-12-12T05:40:40,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/2e16b26fa61a43a997d100014d6d8dd9 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/2e16b26fa61a43a997d100014d6d8dd9 2024-12-12T05:40:40,847 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/2e16b26fa61a43a997d100014d6d8dd9, entries=150, sequenceid=116, filesize=11.7 K 2024-12-12T05:40:40,848 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 28540e98a53f5d1213a72e3944e7527f in 1667ms, sequenceid=116, compaction requested=false 2024-12-12T05:40:40,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:40,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:40,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-12-12T05:40:40,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-12-12T05:40:40,850 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-12-12T05:40:40,850 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8860 sec 2024-12-12T05:40:40,851 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 2.8890 sec 2024-12-12T05:40:41,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:41,076 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 28540e98a53f5d1213a72e3944e7527f 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-12T05:40:41,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=A 2024-12-12T05:40:41,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:41,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=B 2024-12-12T05:40:41,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:41,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=C 2024-12-12T05:40:41,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:41,082 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212b20051aa4603454898b8ee38855a5fa9_28540e98a53f5d1213a72e3944e7527f is 50, key is test_row_0/A:col10/1733982041076/Put/seqid=0 2024-12-12T05:40:41,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742193_1369 (size=17284) 2024-12-12T05:40:41,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:41,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982101114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:41,123 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:41,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982101117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:41,129 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:41,129 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:41,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982101118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:41,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982101118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:41,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:41,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982101223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:41,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:41,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982101223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:41,231 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:41,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982101230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:41,233 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:41,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982101230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:41,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:41,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982101429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:41,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:41,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982101429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:41,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:41,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982101432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:41,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:41,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982101435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:41,485 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:40:41,488 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212b20051aa4603454898b8ee38855a5fa9_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212b20051aa4603454898b8ee38855a5fa9_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:41,489 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/d0133791ff3443fa97f3ed8357c58c01, store: [table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:41,490 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/d0133791ff3443fa97f3ed8357c58c01 is 175, key is test_row_0/A:col10/1733982041076/Put/seqid=0 2024-12-12T05:40:41,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742194_1370 (size=48389) 2024-12-12T05:40:41,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:41,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982101733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:41,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:41,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982101734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:41,739 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:41,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982101736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:41,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:41,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982101740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:41,922 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=137, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/d0133791ff3443fa97f3ed8357c58c01 2024-12-12T05:40:41,928 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/1edcd1921d434d1f80927f01ee76b82d is 50, key is test_row_0/B:col10/1733982041076/Put/seqid=0 2024-12-12T05:40:41,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742195_1371 (size=12151) 2024-12-12T05:40:41,966 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:41,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982101964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:41,967 DEBUG [Thread-1533 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4146 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f., hostname=83e80bf221ca,46457,1733981928566, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:40:42,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-12T05:40:42,066 INFO [Thread-1535 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-12-12T05:40:42,067 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:40:42,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-12-12T05:40:42,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-12T05:40:42,068 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:40:42,069 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:40:42,069 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:40:42,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-12T05:40:42,220 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:42,220 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-12T05:40:42,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:42,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:42,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:42,220 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:42,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:42,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:42,240 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:42,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982102238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:42,240 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:42,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982102238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:42,242 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:42,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982102240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:42,250 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:42,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982102248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:42,331 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/1edcd1921d434d1f80927f01ee76b82d 2024-12-12T05:40:42,337 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/61670084a1214e2aa67d10ccdf185c71 is 50, key is test_row_0/C:col10/1733982041076/Put/seqid=0 2024-12-12T05:40:42,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742196_1372 (size=12151) 2024-12-12T05:40:42,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-12T05:40:42,372 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:42,372 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-12T05:40:42,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:42,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:42,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:42,373 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:42,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:42,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:42,524 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:42,525 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-12T05:40:42,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:42,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:42,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:42,525 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:42,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:42,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:42,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-12T05:40:42,677 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:42,677 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-12T05:40:42,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:42,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:42,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:42,677 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:42,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:42,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:42,741 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/61670084a1214e2aa67d10ccdf185c71 2024-12-12T05:40:42,744 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/d0133791ff3443fa97f3ed8357c58c01 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/d0133791ff3443fa97f3ed8357c58c01 2024-12-12T05:40:42,747 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/d0133791ff3443fa97f3ed8357c58c01, entries=250, sequenceid=137, filesize=47.3 K 2024-12-12T05:40:42,747 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/1edcd1921d434d1f80927f01ee76b82d as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/1edcd1921d434d1f80927f01ee76b82d 2024-12-12T05:40:42,750 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/1edcd1921d434d1f80927f01ee76b82d, entries=150, sequenceid=137, filesize=11.9 K 2024-12-12T05:40:42,751 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/61670084a1214e2aa67d10ccdf185c71 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/61670084a1214e2aa67d10ccdf185c71 2024-12-12T05:40:42,753 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/61670084a1214e2aa67d10ccdf185c71, entries=150, sequenceid=137, filesize=11.9 K 2024-12-12T05:40:42,754 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 28540e98a53f5d1213a72e3944e7527f in 1679ms, sequenceid=137, compaction requested=true 2024-12-12T05:40:42,754 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:42,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 28540e98a53f5d1213a72e3944e7527f:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:40:42,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:42,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 28540e98a53f5d1213a72e3944e7527f:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:40:42,754 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:42,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:42,754 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:42,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 28540e98a53f5d1213a72e3944e7527f:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:40:42,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:40:42,755 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110505 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:42,755 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:42,755 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 28540e98a53f5d1213a72e3944e7527f/B is initiating minor compaction (all files) 2024-12-12T05:40:42,755 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 28540e98a53f5d1213a72e3944e7527f/A is initiating minor compaction (all files) 2024-12-12T05:40:42,755 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 28540e98a53f5d1213a72e3944e7527f/A in TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:42,755 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 28540e98a53f5d1213a72e3944e7527f/B in TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:42,755 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/d1e6ac131a084b7d9a30b50250c7bbe4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/1b0d8a0fe65c4c4480a3b491339780a7, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/d0133791ff3443fa97f3ed8357c58c01] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp, totalSize=107.9 K 2024-12-12T05:40:42,755 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/14d277a65f5b4a5cba952ec0597c3c32, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/53f107ab2ad045da85b27ba9f3df217c, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/1edcd1921d434d1f80927f01ee76b82d] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp, totalSize=35.5 K 2024-12-12T05:40:42,755 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:42,756 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. files: [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/d1e6ac131a084b7d9a30b50250c7bbe4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/1b0d8a0fe65c4c4480a3b491339780a7, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/d0133791ff3443fa97f3ed8357c58c01] 2024-12-12T05:40:42,756 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 14d277a65f5b4a5cba952ec0597c3c32, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733982036674 2024-12-12T05:40:42,756 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1e6ac131a084b7d9a30b50250c7bbe4, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733982036674 2024-12-12T05:40:42,756 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 53f107ab2ad045da85b27ba9f3df217c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733982037815 2024-12-12T05:40:42,756 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b0d8a0fe65c4c4480a3b491339780a7, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733982037815 2024-12-12T05:40:42,757 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 1edcd1921d434d1f80927f01ee76b82d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733982039958 2024-12-12T05:40:42,757 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting d0133791ff3443fa97f3ed8357c58c01, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733982039953 2024-12-12T05:40:42,767 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:42,773 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 28540e98a53f5d1213a72e3944e7527f#B#compaction#313 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:42,773 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/46cb365f7760463783d8df1098e44019 is 50, key is test_row_0/B:col10/1733982041076/Put/seqid=0 2024-12-12T05:40:42,775 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212f508c0af0da04bb089808e3714e3633a_28540e98a53f5d1213a72e3944e7527f store=[table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:42,777 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212f508c0af0da04bb089808e3714e3633a_28540e98a53f5d1213a72e3944e7527f, store=[table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:42,777 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212f508c0af0da04bb089808e3714e3633a_28540e98a53f5d1213a72e3944e7527f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:42,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742197_1373 (size=12459) 2024-12-12T05:40:42,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742198_1374 (size=4469) 2024-12-12T05:40:42,808 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 28540e98a53f5d1213a72e3944e7527f#A#compaction#312 average throughput is 0.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:42,808 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/ab25394924ea4bb7a554091a31cf0467 is 175, key is test_row_0/A:col10/1733982041076/Put/seqid=0 2024-12-12T05:40:42,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742199_1375 (size=31413) 2024-12-12T05:40:42,829 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:42,829 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-12T05:40:42,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:42,830 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 28540e98a53f5d1213a72e3944e7527f 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-12T05:40:42,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=A 2024-12-12T05:40:42,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:42,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=B 2024-12-12T05:40:42,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:42,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=C 2024-12-12T05:40:42,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:42,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212f931b9178e6649f2867b63a4bc88617b_28540e98a53f5d1213a72e3944e7527f is 50, key is test_row_0/A:col10/1733982041116/Put/seqid=0 2024-12-12T05:40:42,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742200_1376 (size=12304) 2024-12-12T05:40:42,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:40:42,866 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212f931b9178e6649f2867b63a4bc88617b_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212f931b9178e6649f2867b63a4bc88617b_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:42,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/ba9d4dc4ef8a458ea13cc327cd1bd7ac, store: [table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:42,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/ba9d4dc4ef8a458ea13cc327cd1bd7ac is 175, key is test_row_0/A:col10/1733982041116/Put/seqid=0 2024-12-12T05:40:42,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742201_1377 (size=31105) 2024-12-12T05:40:42,877 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=155, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/ba9d4dc4ef8a458ea13cc327cd1bd7ac 2024-12-12T05:40:42,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/9822f646fc3141d997a4b10d59b31e4a is 50, key is test_row_0/B:col10/1733982041116/Put/seqid=0 2024-12-12T05:40:42,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742202_1378 (size=12151) 2024-12-12T05:40:43,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-12T05:40:43,192 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/46cb365f7760463783d8df1098e44019 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/46cb365f7760463783d8df1098e44019 2024-12-12T05:40:43,195 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 28540e98a53f5d1213a72e3944e7527f/B of 28540e98a53f5d1213a72e3944e7527f into 46cb365f7760463783d8df1098e44019(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:43,196 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:43,196 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f., storeName=28540e98a53f5d1213a72e3944e7527f/B, priority=13, startTime=1733982042754; duration=0sec 2024-12-12T05:40:43,196 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:40:43,196 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 28540e98a53f5d1213a72e3944e7527f:B 2024-12-12T05:40:43,196 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:43,196 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:43,196 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 28540e98a53f5d1213a72e3944e7527f/C is initiating minor compaction (all files) 2024-12-12T05:40:43,197 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 28540e98a53f5d1213a72e3944e7527f/C in TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:43,197 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/e241ccd1b5464c26bf8831d1e5b1a4e2, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/2e16b26fa61a43a997d100014d6d8dd9, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/61670084a1214e2aa67d10ccdf185c71] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp, totalSize=35.5 K 2024-12-12T05:40:43,197 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting e241ccd1b5464c26bf8831d1e5b1a4e2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733982036674 2024-12-12T05:40:43,197 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e16b26fa61a43a997d100014d6d8dd9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733982037815 2024-12-12T05:40:43,197 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 61670084a1214e2aa67d10ccdf185c71, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733982039958 2024-12-12T05:40:43,203 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 28540e98a53f5d1213a72e3944e7527f#C#compaction#316 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:43,203 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/09f71ba085204133a088c1355a81a20a is 50, key is test_row_0/C:col10/1733982041076/Put/seqid=0 2024-12-12T05:40:43,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742203_1379 (size=12459) 2024-12-12T05:40:43,210 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/09f71ba085204133a088c1355a81a20a as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/09f71ba085204133a088c1355a81a20a 2024-12-12T05:40:43,213 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 28540e98a53f5d1213a72e3944e7527f/C of 28540e98a53f5d1213a72e3944e7527f into 09f71ba085204133a088c1355a81a20a(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:43,214 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:43,214 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f., storeName=28540e98a53f5d1213a72e3944e7527f/C, priority=13, startTime=1733982042754; duration=0sec 2024-12-12T05:40:43,214 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:43,214 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 28540e98a53f5d1213a72e3944e7527f:C 2024-12-12T05:40:43,229 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/ab25394924ea4bb7a554091a31cf0467 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/ab25394924ea4bb7a554091a31cf0467 2024-12-12T05:40:43,233 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 28540e98a53f5d1213a72e3944e7527f/A of 28540e98a53f5d1213a72e3944e7527f into ab25394924ea4bb7a554091a31cf0467(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:43,233 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:43,233 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f., storeName=28540e98a53f5d1213a72e3944e7527f/A, priority=13, startTime=1733982042754; duration=0sec 2024-12-12T05:40:43,233 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:43,233 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 28540e98a53f5d1213a72e3944e7527f:A 2024-12-12T05:40:43,246 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:43,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:43,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:43,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982103262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:43,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:43,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982103263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:43,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:43,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982103263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:43,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:43,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982103264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:43,290 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/9822f646fc3141d997a4b10d59b31e4a 2024-12-12T05:40:43,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/7f357cebaa1f48c592d7cc897c0894df is 50, key is test_row_0/C:col10/1733982041116/Put/seqid=0 2024-12-12T05:40:43,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742204_1380 (size=12151) 2024-12-12T05:40:43,299 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/7f357cebaa1f48c592d7cc897c0894df 2024-12-12T05:40:43,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/ba9d4dc4ef8a458ea13cc327cd1bd7ac as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/ba9d4dc4ef8a458ea13cc327cd1bd7ac 2024-12-12T05:40:43,307 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/ba9d4dc4ef8a458ea13cc327cd1bd7ac, entries=150, sequenceid=155, filesize=30.4 K 2024-12-12T05:40:43,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/9822f646fc3141d997a4b10d59b31e4a as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/9822f646fc3141d997a4b10d59b31e4a 2024-12-12T05:40:43,311 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/9822f646fc3141d997a4b10d59b31e4a, entries=150, sequenceid=155, filesize=11.9 K 2024-12-12T05:40:43,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/7f357cebaa1f48c592d7cc897c0894df as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/7f357cebaa1f48c592d7cc897c0894df 2024-12-12T05:40:43,317 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/7f357cebaa1f48c592d7cc897c0894df, entries=150, sequenceid=155, filesize=11.9 K 2024-12-12T05:40:43,318 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 28540e98a53f5d1213a72e3944e7527f in 488ms, sequenceid=155, compaction requested=false 2024-12-12T05:40:43,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:43,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:43,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-12-12T05:40:43,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-12-12T05:40:43,319 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-12-12T05:40:43,320 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2500 sec 2024-12-12T05:40:43,320 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 1.2530 sec 2024-12-12T05:40:43,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:43,371 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 28540e98a53f5d1213a72e3944e7527f 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-12T05:40:43,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=A 2024-12-12T05:40:43,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:43,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=B 2024-12-12T05:40:43,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:43,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=C 2024-12-12T05:40:43,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:43,377 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212a3d4791609dc4b50abd057911dee43e9_28540e98a53f5d1213a72e3944e7527f is 50, key is test_row_0/A:col10/1733982043370/Put/seqid=0 2024-12-12T05:40:43,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742205_1381 (size=12304) 2024-12-12T05:40:43,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:43,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982103387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:43,392 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:43,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982103388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:43,392 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:43,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982103388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:43,392 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:43,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982103389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:43,490 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:43,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982103490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:43,494 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:43,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982103492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:43,495 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:43,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982103493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:43,495 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:43,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982103493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:43,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:43,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982103692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:43,698 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:43,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982103695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:43,698 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:43,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982103695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:43,698 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:43,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982103696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:43,780 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:40:43,783 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212a3d4791609dc4b50abd057911dee43e9_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a3d4791609dc4b50abd057911dee43e9_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:43,784 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/5658c2d97fff4c6fbfdd292de029bed3, store: [table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:43,784 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/5658c2d97fff4c6fbfdd292de029bed3 is 175, key is test_row_0/A:col10/1733982043370/Put/seqid=0 2024-12-12T05:40:43,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742206_1382 (size=31105) 2024-12-12T05:40:43,789 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=179, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/5658c2d97fff4c6fbfdd292de029bed3 2024-12-12T05:40:43,794 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/1267ed261f52422cbef251f44c46d307 is 50, key is test_row_0/B:col10/1733982043370/Put/seqid=0 2024-12-12T05:40:43,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742207_1383 (size=12151) 2024-12-12T05:40:43,801 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=179 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/1267ed261f52422cbef251f44c46d307 2024-12-12T05:40:43,807 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/b9a26a98a5404aedae9567f279e2175c is 50, key is test_row_0/C:col10/1733982043370/Put/seqid=0 2024-12-12T05:40:43,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742208_1384 (size=12151) 2024-12-12T05:40:43,999 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:43,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982103996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:44,002 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:44,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982104000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:44,003 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:44,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982104000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:44,003 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:44,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982104000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:44,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-12T05:40:44,171 INFO [Thread-1535 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-12-12T05:40:44,172 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:40:44,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-12-12T05:40:44,173 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:40:44,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T05:40:44,174 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:40:44,174 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:40:44,215 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=179 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/b9a26a98a5404aedae9567f279e2175c 2024-12-12T05:40:44,219 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/5658c2d97fff4c6fbfdd292de029bed3 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/5658c2d97fff4c6fbfdd292de029bed3 2024-12-12T05:40:44,221 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/5658c2d97fff4c6fbfdd292de029bed3, entries=150, sequenceid=179, filesize=30.4 K 2024-12-12T05:40:44,221 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/1267ed261f52422cbef251f44c46d307 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/1267ed261f52422cbef251f44c46d307 2024-12-12T05:40:44,224 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/1267ed261f52422cbef251f44c46d307, entries=150, sequenceid=179, filesize=11.9 K 2024-12-12T05:40:44,225 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/b9a26a98a5404aedae9567f279e2175c as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/b9a26a98a5404aedae9567f279e2175c 2024-12-12T05:40:44,227 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/b9a26a98a5404aedae9567f279e2175c, entries=150, sequenceid=179, filesize=11.9 K 2024-12-12T05:40:44,228 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 28540e98a53f5d1213a72e3944e7527f in 857ms, sequenceid=179, compaction requested=true 2024-12-12T05:40:44,228 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:44,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 28540e98a53f5d1213a72e3944e7527f:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:40:44,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:44,229 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:44,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 28540e98a53f5d1213a72e3944e7527f:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:40:44,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:44,229 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:44,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 28540e98a53f5d1213a72e3944e7527f:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:40:44,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:40:44,229 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:44,229 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93623 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:44,229 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 28540e98a53f5d1213a72e3944e7527f/B is initiating minor compaction (all files) 2024-12-12T05:40:44,229 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 28540e98a53f5d1213a72e3944e7527f/A is initiating minor compaction (all files) 2024-12-12T05:40:44,230 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 28540e98a53f5d1213a72e3944e7527f/A in TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:44,230 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 28540e98a53f5d1213a72e3944e7527f/B in TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:44,230 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/46cb365f7760463783d8df1098e44019, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/9822f646fc3141d997a4b10d59b31e4a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/1267ed261f52422cbef251f44c46d307] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp, totalSize=35.9 K 2024-12-12T05:40:44,230 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/ab25394924ea4bb7a554091a31cf0467, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/ba9d4dc4ef8a458ea13cc327cd1bd7ac, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/5658c2d97fff4c6fbfdd292de029bed3] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp, totalSize=91.4 K 2024-12-12T05:40:44,230 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:44,230 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. files: [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/ab25394924ea4bb7a554091a31cf0467, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/ba9d4dc4ef8a458ea13cc327cd1bd7ac, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/5658c2d97fff4c6fbfdd292de029bed3] 2024-12-12T05:40:44,231 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 46cb365f7760463783d8df1098e44019, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733982039958 2024-12-12T05:40:44,231 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting ab25394924ea4bb7a554091a31cf0467, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733982039958 2024-12-12T05:40:44,231 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting ba9d4dc4ef8a458ea13cc327cd1bd7ac, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733982041111 2024-12-12T05:40:44,231 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 9822f646fc3141d997a4b10d59b31e4a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733982041111 2024-12-12T05:40:44,232 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5658c2d97fff4c6fbfdd292de029bed3, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1733982043262 2024-12-12T05:40:44,232 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 1267ed261f52422cbef251f44c46d307, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1733982043262 2024-12-12T05:40:44,237 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:44,237 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 28540e98a53f5d1213a72e3944e7527f#B#compaction#321 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:44,238 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/f7f21cb8fe5e4d3ea270f5fc62ba240d is 50, key is test_row_0/B:col10/1733982043370/Put/seqid=0 2024-12-12T05:40:44,239 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212971b1facdb8841a392f5183a43a6722e_28540e98a53f5d1213a72e3944e7527f store=[table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:44,240 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212971b1facdb8841a392f5183a43a6722e_28540e98a53f5d1213a72e3944e7527f, store=[table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:44,240 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212971b1facdb8841a392f5183a43a6722e_28540e98a53f5d1213a72e3944e7527f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:44,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742209_1385 (size=12561) 2024-12-12T05:40:44,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742210_1386 (size=4469) 2024-12-12T05:40:44,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T05:40:44,325 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:44,325 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-12T05:40:44,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:44,325 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing 28540e98a53f5d1213a72e3944e7527f 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-12T05:40:44,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=A 2024-12-12T05:40:44,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:44,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=B 2024-12-12T05:40:44,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:44,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=C 2024-12-12T05:40:44,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:44,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412127eea6b74db114c70b5ff7d90500050bc_28540e98a53f5d1213a72e3944e7527f is 50, key is test_row_0/A:col10/1733982043387/Put/seqid=0 2024-12-12T05:40:44,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742211_1387 (size=12304) 2024-12-12T05:40:44,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T05:40:44,506 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:44,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:44,533 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:44,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982104528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:44,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:44,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982104529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:44,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:44,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982104533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:44,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:44,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982104533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:44,637 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:44,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982104634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:44,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:44,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982104634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:44,642 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:44,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982104638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:44,643 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:44,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982104638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:44,655 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 28540e98a53f5d1213a72e3944e7527f#A#compaction#322 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:44,656 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/1349880594db41a3bf9791631a40119a is 175, key is test_row_0/A:col10/1733982043370/Put/seqid=0 2024-12-12T05:40:44,660 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/f7f21cb8fe5e4d3ea270f5fc62ba240d as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/f7f21cb8fe5e4d3ea270f5fc62ba240d 2024-12-12T05:40:44,663 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 28540e98a53f5d1213a72e3944e7527f/B of 28540e98a53f5d1213a72e3944e7527f into f7f21cb8fe5e4d3ea270f5fc62ba240d(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:44,664 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:44,664 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f., storeName=28540e98a53f5d1213a72e3944e7527f/B, priority=13, startTime=1733982044229; duration=0sec 2024-12-12T05:40:44,664 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:40:44,664 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 28540e98a53f5d1213a72e3944e7527f:B 2024-12-12T05:40:44,664 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:44,665 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:44,665 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 28540e98a53f5d1213a72e3944e7527f/C is initiating minor compaction (all files) 2024-12-12T05:40:44,665 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 28540e98a53f5d1213a72e3944e7527f/C in TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:44,665 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/09f71ba085204133a088c1355a81a20a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/7f357cebaa1f48c592d7cc897c0894df, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/b9a26a98a5404aedae9567f279e2175c] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp, totalSize=35.9 K 2024-12-12T05:40:44,666 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 09f71ba085204133a088c1355a81a20a, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733982039958 2024-12-12T05:40:44,666 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f357cebaa1f48c592d7cc897c0894df, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733982041111 2024-12-12T05:40:44,666 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting b9a26a98a5404aedae9567f279e2175c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1733982043262 2024-12-12T05:40:44,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742212_1388 (size=31515) 2024-12-12T05:40:44,676 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 28540e98a53f5d1213a72e3944e7527f#C#compaction#324 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:44,676 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/d72c3a1e3f1c49039b837ceb16c86849 is 50, key is test_row_0/C:col10/1733982043370/Put/seqid=0 2024-12-12T05:40:44,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742213_1389 (size=12561) 2024-12-12T05:40:44,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:40:44,739 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412127eea6b74db114c70b5ff7d90500050bc_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412127eea6b74db114c70b5ff7d90500050bc_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:44,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/7d89c915660d47169f4406178365d7ac, store: [table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:44,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/7d89c915660d47169f4406178365d7ac is 175, key is test_row_0/A:col10/1733982043387/Put/seqid=0 2024-12-12T05:40:44,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742214_1390 (size=31105) 2024-12-12T05:40:44,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T05:40:44,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:44,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982104838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:44,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:44,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982104839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:44,844 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:44,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982104843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:44,845 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:44,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982104843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:45,072 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/1349880594db41a3bf9791631a40119a as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/1349880594db41a3bf9791631a40119a 2024-12-12T05:40:45,075 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 28540e98a53f5d1213a72e3944e7527f/A of 28540e98a53f5d1213a72e3944e7527f into 1349880594db41a3bf9791631a40119a(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:45,075 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:45,075 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f., storeName=28540e98a53f5d1213a72e3944e7527f/A, priority=13, startTime=1733982044228; duration=0sec 2024-12-12T05:40:45,075 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:45,076 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 28540e98a53f5d1213a72e3944e7527f:A 2024-12-12T05:40:45,082 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/d72c3a1e3f1c49039b837ceb16c86849 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/d72c3a1e3f1c49039b837ceb16c86849 2024-12-12T05:40:45,085 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 28540e98a53f5d1213a72e3944e7527f/C of 28540e98a53f5d1213a72e3944e7527f into d72c3a1e3f1c49039b837ceb16c86849(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:45,086 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:45,086 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f., storeName=28540e98a53f5d1213a72e3944e7527f/C, priority=13, startTime=1733982044229; duration=0sec 2024-12-12T05:40:45,086 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:45,086 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 28540e98a53f5d1213a72e3944e7527f:C 2024-12-12T05:40:45,147 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:45,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982105142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:45,147 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:45,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982105143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:45,148 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:45,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982105146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:45,149 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:45,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982105147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:45,151 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=195, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/7d89c915660d47169f4406178365d7ac 2024-12-12T05:40:45,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/3d76198a0c744c44b24bb32925625020 is 50, key is test_row_0/B:col10/1733982043387/Put/seqid=0 2024-12-12T05:40:45,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742215_1391 (size=12151) 2024-12-12T05:40:45,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T05:40:45,560 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/3d76198a0c744c44b24bb32925625020 2024-12-12T05:40:45,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/95ad46cc01fb4c89bb2e50cf13fde8c4 is 50, key is test_row_0/C:col10/1733982043387/Put/seqid=0 2024-12-12T05:40:45,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742216_1392 (size=12151) 2024-12-12T05:40:45,652 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:45,652 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:45,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982105649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:45,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982105649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:45,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:45,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982105652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:45,656 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:45,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982105653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:45,968 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/95ad46cc01fb4c89bb2e50cf13fde8c4 2024-12-12T05:40:45,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/7d89c915660d47169f4406178365d7ac as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/7d89c915660d47169f4406178365d7ac 2024-12-12T05:40:45,975 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/7d89c915660d47169f4406178365d7ac, entries=150, sequenceid=195, filesize=30.4 K 2024-12-12T05:40:45,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/3d76198a0c744c44b24bb32925625020 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/3d76198a0c744c44b24bb32925625020 2024-12-12T05:40:45,978 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/3d76198a0c744c44b24bb32925625020, entries=150, sequenceid=195, filesize=11.9 K 2024-12-12T05:40:45,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/95ad46cc01fb4c89bb2e50cf13fde8c4 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/95ad46cc01fb4c89bb2e50cf13fde8c4 2024-12-12T05:40:45,981 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/95ad46cc01fb4c89bb2e50cf13fde8c4, entries=150, sequenceid=195, filesize=11.9 K 2024-12-12T05:40:45,982 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 28540e98a53f5d1213a72e3944e7527f in 1657ms, sequenceid=195, compaction requested=false 2024-12-12T05:40:45,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:45,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:45,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-12-12T05:40:45,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-12-12T05:40:45,984 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-12-12T05:40:45,984 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8090 sec 2024-12-12T05:40:45,985 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 1.8120 sec 2024-12-12T05:40:46,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:46,001 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 28540e98a53f5d1213a72e3944e7527f 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-12T05:40:46,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=A 2024-12-12T05:40:46,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:46,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=B 2024-12-12T05:40:46,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:46,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=C 2024-12-12T05:40:46,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:46,007 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412120fcc4432b2594908bf7837652d8bbc7f_28540e98a53f5d1213a72e3944e7527f is 50, key is test_row_0/A:col10/1733982046000/Put/seqid=0 2024-12-12T05:40:46,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742217_1393 (size=14794) 2024-12-12T05:40:46,048 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:46,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982106044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:46,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:46,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982106149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:46,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T05:40:46,277 INFO [Thread-1535 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-12-12T05:40:46,278 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:40:46,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-12-12T05:40:46,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-12T05:40:46,279 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:40:46,279 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:40:46,280 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:40:46,354 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:46,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982106353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:46,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-12T05:40:46,412 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:40:46,414 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412120fcc4432b2594908bf7837652d8bbc7f_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412120fcc4432b2594908bf7837652d8bbc7f_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:46,415 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/af29aab345e947b8923a5aa60d201f62, store: [table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:46,415 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/af29aab345e947b8923a5aa60d201f62 is 175, key is test_row_0/A:col10/1733982046000/Put/seqid=0 2024-12-12T05:40:46,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742218_1394 (size=39749) 2024-12-12T05:40:46,431 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:46,431 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-12T05:40:46,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:46,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:46,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:46,431 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:46,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:46,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:46,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-12T05:40:46,583 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:46,583 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-12T05:40:46,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:46,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:46,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:46,584 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:46,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:46,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:46,657 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:46,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982106655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:46,657 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:46,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982106655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:46,659 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:46,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982106656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:46,665 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:46,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982106662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:46,665 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:46,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982106663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:46,735 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:46,736 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-12T05:40:46,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:46,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:46,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:46,736 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:46,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:46,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:46,819 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=220, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/af29aab345e947b8923a5aa60d201f62 2024-12-12T05:40:46,825 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/ba12558c5cf74007b9b50d65f911c7e8 is 50, key is test_row_0/B:col10/1733982046000/Put/seqid=0 2024-12-12T05:40:46,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742219_1395 (size=12151) 2024-12-12T05:40:46,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-12T05:40:46,887 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:46,888 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-12T05:40:46,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:46,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:46,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:46,888 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:46,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:46,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:46,987 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T05:40:47,040 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:47,040 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-12T05:40:47,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:47,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:47,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:47,040 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:47,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:47,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:47,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:47,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982107160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:47,192 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:47,192 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-12T05:40:47,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:47,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:47,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:47,192 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:47,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:47,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:47,229 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=220 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/ba12558c5cf74007b9b50d65f911c7e8 2024-12-12T05:40:47,234 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/fcf227109a164fac8734ba9daaa8ea1e is 50, key is test_row_0/C:col10/1733982046000/Put/seqid=0 2024-12-12T05:40:47,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742220_1396 (size=12151) 2024-12-12T05:40:47,344 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:47,344 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-12T05:40:47,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:47,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:47,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:47,345 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:47,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:47,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:47,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-12T05:40:47,496 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:47,497 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-12T05:40:47,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:47,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:47,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:47,497 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:47,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:47,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:47,639 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=220 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/fcf227109a164fac8734ba9daaa8ea1e 2024-12-12T05:40:47,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/af29aab345e947b8923a5aa60d201f62 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/af29aab345e947b8923a5aa60d201f62 2024-12-12T05:40:47,646 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/af29aab345e947b8923a5aa60d201f62, entries=200, sequenceid=220, filesize=38.8 K 2024-12-12T05:40:47,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/ba12558c5cf74007b9b50d65f911c7e8 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/ba12558c5cf74007b9b50d65f911c7e8 2024-12-12T05:40:47,648 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:47,649 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-12T05:40:47,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:47,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:47,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:47,649 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:47,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:47,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:47,652 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/ba12558c5cf74007b9b50d65f911c7e8, entries=150, sequenceid=220, filesize=11.9 K 2024-12-12T05:40:47,653 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/fcf227109a164fac8734ba9daaa8ea1e as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/fcf227109a164fac8734ba9daaa8ea1e 2024-12-12T05:40:47,656 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/fcf227109a164fac8734ba9daaa8ea1e, entries=150, sequenceid=220, filesize=11.9 K 2024-12-12T05:40:47,656 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 28540e98a53f5d1213a72e3944e7527f in 1655ms, sequenceid=220, compaction requested=true 2024-12-12T05:40:47,656 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:47,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 28540e98a53f5d1213a72e3944e7527f:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:40:47,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:47,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 28540e98a53f5d1213a72e3944e7527f:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:40:47,657 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:47,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:47,657 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:47,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 28540e98a53f5d1213a72e3944e7527f:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:40:47,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:40:47,658 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102369 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:47,658 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 28540e98a53f5d1213a72e3944e7527f/A is initiating minor compaction (all files) 2024-12-12T05:40:47,658 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:47,658 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 28540e98a53f5d1213a72e3944e7527f/A in TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:47,658 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 28540e98a53f5d1213a72e3944e7527f/B is initiating minor compaction (all files) 2024-12-12T05:40:47,658 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/1349880594db41a3bf9791631a40119a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/7d89c915660d47169f4406178365d7ac, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/af29aab345e947b8923a5aa60d201f62] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp, totalSize=100.0 K 2024-12-12T05:40:47,658 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 28540e98a53f5d1213a72e3944e7527f/B in TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:47,658 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:47,658 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. files: [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/1349880594db41a3bf9791631a40119a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/7d89c915660d47169f4406178365d7ac, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/af29aab345e947b8923a5aa60d201f62] 2024-12-12T05:40:47,658 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/f7f21cb8fe5e4d3ea270f5fc62ba240d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/3d76198a0c744c44b24bb32925625020, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/ba12558c5cf74007b9b50d65f911c7e8] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp, totalSize=36.0 K 2024-12-12T05:40:47,658 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting f7f21cb8fe5e4d3ea270f5fc62ba240d, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1733982043262 2024-12-12T05:40:47,658 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1349880594db41a3bf9791631a40119a, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1733982043262 2024-12-12T05:40:47,658 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d76198a0c744c44b24bb32925625020, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1733982043382 2024-12-12T05:40:47,658 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7d89c915660d47169f4406178365d7ac, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1733982043382 2024-12-12T05:40:47,659 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting ba12558c5cf74007b9b50d65f911c7e8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1733982044528 2024-12-12T05:40:47,659 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting af29aab345e947b8923a5aa60d201f62, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1733982044528 2024-12-12T05:40:47,668 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:47,670 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412126042cffebcbc47699e8cfeee286ffed9_28540e98a53f5d1213a72e3944e7527f store=[table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:47,671 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412126042cffebcbc47699e8cfeee286ffed9_28540e98a53f5d1213a72e3944e7527f, store=[table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:47,671 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 28540e98a53f5d1213a72e3944e7527f#B#compaction#331 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:47,671 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412126042cffebcbc47699e8cfeee286ffed9_28540e98a53f5d1213a72e3944e7527f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:47,671 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/32acfc00c92e44fb8f192eadcf4d0bd4 is 50, key is test_row_0/B:col10/1733982046000/Put/seqid=0 2024-12-12T05:40:47,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742222_1398 (size=12663) 2024-12-12T05:40:47,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742221_1397 (size=4469) 2024-12-12T05:40:47,699 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 28540e98a53f5d1213a72e3944e7527f#A#compaction#330 average throughput is 0.81 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:47,699 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/9f8deb61b255433cb456df75ff582ec8 is 175, key is test_row_0/A:col10/1733982046000/Put/seqid=0 2024-12-12T05:40:47,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742223_1399 (size=31617) 2024-12-12T05:40:47,801 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:47,802 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-12T05:40:47,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:47,802 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing 28540e98a53f5d1213a72e3944e7527f 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-12T05:40:47,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=A 2024-12-12T05:40:47,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:47,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=B 2024-12-12T05:40:47,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:47,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=C 2024-12-12T05:40:47,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:47,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121203ef8048b3be43d598dd5d469f9a1542_28540e98a53f5d1213a72e3944e7527f is 50, key is test_row_0/A:col10/1733982046035/Put/seqid=0 2024-12-12T05:40:47,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742224_1400 (size=12304) 2024-12-12T05:40:48,097 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/32acfc00c92e44fb8f192eadcf4d0bd4 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/32acfc00c92e44fb8f192eadcf4d0bd4 2024-12-12T05:40:48,101 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 28540e98a53f5d1213a72e3944e7527f/B of 28540e98a53f5d1213a72e3944e7527f into 32acfc00c92e44fb8f192eadcf4d0bd4(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:48,101 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:48,101 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f., storeName=28540e98a53f5d1213a72e3944e7527f/B, priority=13, startTime=1733982047657; duration=0sec 2024-12-12T05:40:48,102 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:40:48,102 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 28540e98a53f5d1213a72e3944e7527f:B 2024-12-12T05:40:48,102 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:48,102 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:48,102 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 28540e98a53f5d1213a72e3944e7527f/C is initiating minor compaction (all files) 2024-12-12T05:40:48,102 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 28540e98a53f5d1213a72e3944e7527f/C in TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:48,103 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/d72c3a1e3f1c49039b837ceb16c86849, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/95ad46cc01fb4c89bb2e50cf13fde8c4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/fcf227109a164fac8734ba9daaa8ea1e] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp, totalSize=36.0 K 2024-12-12T05:40:48,103 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting d72c3a1e3f1c49039b837ceb16c86849, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1733982043262 2024-12-12T05:40:48,103 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 95ad46cc01fb4c89bb2e50cf13fde8c4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1733982043382 2024-12-12T05:40:48,103 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting fcf227109a164fac8734ba9daaa8ea1e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1733982044528 2024-12-12T05:40:48,112 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/9f8deb61b255433cb456df75ff582ec8 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/9f8deb61b255433cb456df75ff582ec8 2024-12-12T05:40:48,113 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 28540e98a53f5d1213a72e3944e7527f#C#compaction#333 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:48,113 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/11927047ad994df897a54b351e141737 is 50, key is test_row_0/C:col10/1733982046000/Put/seqid=0 2024-12-12T05:40:48,116 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 28540e98a53f5d1213a72e3944e7527f/A of 28540e98a53f5d1213a72e3944e7527f into 9f8deb61b255433cb456df75ff582ec8(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:48,116 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:48,116 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f., storeName=28540e98a53f5d1213a72e3944e7527f/A, priority=13, startTime=1733982047657; duration=0sec 2024-12-12T05:40:48,116 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:48,116 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 28540e98a53f5d1213a72e3944e7527f:A 2024-12-12T05:40:48,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742225_1401 (size=12663) 2024-12-12T05:40:48,129 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/11927047ad994df897a54b351e141737 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/11927047ad994df897a54b351e141737 2024-12-12T05:40:48,132 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 28540e98a53f5d1213a72e3944e7527f/C of 28540e98a53f5d1213a72e3944e7527f into 11927047ad994df897a54b351e141737(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:48,132 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:48,132 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f., storeName=28540e98a53f5d1213a72e3944e7527f/C, priority=13, startTime=1733982047657; duration=0sec 2024-12-12T05:40:48,132 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:48,132 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 28540e98a53f5d1213a72e3944e7527f:C 2024-12-12T05:40:48,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:48,167 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:48,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:40:48,218 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121203ef8048b3be43d598dd5d469f9a1542_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121203ef8048b3be43d598dd5d469f9a1542_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:48,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/c85bb2ecfbaf46da8abda431f5b0d44f, store: [table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:48,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/c85bb2ecfbaf46da8abda431f5b0d44f is 175, key is test_row_0/A:col10/1733982046035/Put/seqid=0 2024-12-12T05:40:48,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742226_1402 (size=31105) 2024-12-12T05:40:48,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:48,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982108233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:48,339 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:48,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982108337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:48,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-12T05:40:48,542 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:48,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982108540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:48,622 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=234, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/c85bb2ecfbaf46da8abda431f5b0d44f 2024-12-12T05:40:48,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/6b3f4c59f18f4f069f7a2c84bae695fb is 50, key is test_row_0/B:col10/1733982046035/Put/seqid=0 2024-12-12T05:40:48,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742227_1403 (size=12151) 2024-12-12T05:40:48,670 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:48,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44518 deadline: 1733982108669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:48,671 DEBUG [Thread-1527 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4138 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f., hostname=83e80bf221ca,46457,1733981928566, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:40:48,676 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:48,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44468 deadline: 1733982108673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:48,676 DEBUG [Thread-1531 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4148 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f., hostname=83e80bf221ca,46457,1733981928566, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:40:48,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:48,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44540 deadline: 1733982108675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:48,677 DEBUG [Thread-1525 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4148 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f., hostname=83e80bf221ca,46457,1733981928566, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:40:48,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:48,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44558 deadline: 1733982108677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:48,680 DEBUG [Thread-1529 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4147 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f., hostname=83e80bf221ca,46457,1733981928566, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:40:48,845 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:48,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982108843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:49,032 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/6b3f4c59f18f4f069f7a2c84bae695fb 2024-12-12T05:40:49,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/f6151b47284d4849915a57176b6fb3b4 is 50, key is test_row_0/C:col10/1733982046035/Put/seqid=0 2024-12-12T05:40:49,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742228_1404 (size=12151) 2024-12-12T05:40:49,347 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:49,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982109346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:49,440 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/f6151b47284d4849915a57176b6fb3b4 2024-12-12T05:40:49,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/c85bb2ecfbaf46da8abda431f5b0d44f as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/c85bb2ecfbaf46da8abda431f5b0d44f 2024-12-12T05:40:49,446 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/c85bb2ecfbaf46da8abda431f5b0d44f, entries=150, sequenceid=234, filesize=30.4 K 2024-12-12T05:40:49,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/6b3f4c59f18f4f069f7a2c84bae695fb as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/6b3f4c59f18f4f069f7a2c84bae695fb 2024-12-12T05:40:49,450 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/6b3f4c59f18f4f069f7a2c84bae695fb, entries=150, sequenceid=234, filesize=11.9 K 2024-12-12T05:40:49,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/f6151b47284d4849915a57176b6fb3b4 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/f6151b47284d4849915a57176b6fb3b4 2024-12-12T05:40:49,454 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/f6151b47284d4849915a57176b6fb3b4, entries=150, sequenceid=234, filesize=11.9 K 2024-12-12T05:40:49,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-12T05:40:49,454 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 28540e98a53f5d1213a72e3944e7527f in 1652ms, sequenceid=234, compaction requested=false 2024-12-12T05:40:49,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:49,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:49,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-12-12T05:40:49,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-12-12T05:40:49,456 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-12-12T05:40:49,456 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.1750 sec 2024-12-12T05:40:49,457 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 3.1790 sec 2024-12-12T05:40:50,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:50,358 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 28540e98a53f5d1213a72e3944e7527f 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-12T05:40:50,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=A 2024-12-12T05:40:50,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:50,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=B 2024-12-12T05:40:50,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:50,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=C 2024-12-12T05:40:50,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:50,363 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412129bf8aaf85d5046dcbc90c5101fe20941_28540e98a53f5d1213a72e3944e7527f is 50, key is test_row_0/A:col10/1733982050357/Put/seqid=0 2024-12-12T05:40:50,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742229_1405 (size=14944) 2024-12-12T05:40:50,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-12T05:40:50,383 INFO [Thread-1535 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-12-12T05:40:50,384 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:40:50,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-12-12T05:40:50,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-12T05:40:50,385 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:40:50,385 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:40:50,385 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:40:50,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:50,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982110407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:50,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-12T05:40:50,514 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:50,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982110512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:50,536 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:50,537 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T05:40:50,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:50,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:50,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:50,537 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:50,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:50,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:50,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-12T05:40:50,688 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:50,689 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T05:40:50,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:50,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:50,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:50,689 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:50,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:50,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:50,718 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:50,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982110715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:50,766 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:40:50,769 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412129bf8aaf85d5046dcbc90c5101fe20941_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412129bf8aaf85d5046dcbc90c5101fe20941_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:50,770 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/ff6ca012bddb48c7931b64cabb594a50, store: [table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:50,770 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/ff6ca012bddb48c7931b64cabb594a50 is 175, key is test_row_0/A:col10/1733982050357/Put/seqid=0 2024-12-12T05:40:50,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742230_1406 (size=39899) 2024-12-12T05:40:50,841 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:50,841 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T05:40:50,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:50,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:50,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:50,841 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:50,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:50,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:50,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-12T05:40:50,993 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:50,993 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T05:40:50,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:50,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:50,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:50,993 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:50,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:50,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:51,021 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:51,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982111019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:51,145 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:51,145 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T05:40:51,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:51,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:51,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:51,145 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:51,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:51,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:51,181 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=260, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/ff6ca012bddb48c7931b64cabb594a50 2024-12-12T05:40:51,187 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/07f9aa0e92704e809fb8727e6b3f9b9a is 50, key is test_row_0/B:col10/1733982050357/Put/seqid=0 2024-12-12T05:40:51,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742231_1407 (size=12251) 2024-12-12T05:40:51,297 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:51,297 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T05:40:51,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:51,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:51,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:51,297 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:51,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:51,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:51,353 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/83e80bf221ca:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/41ead117df4246d7831fc859db2c99a4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/8ee55d90f0f44dfa9fda1cb5b13304ee, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/e0ce7d27bc504b64b04a7b0a6ef1516a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/e3f54cb8145a46b5831b6c662ce2af7b, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/9fcc26504ca549358c4bf878e358e99a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/4ce3ff16c2724a289b60554c5ed9f8d2, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/d1e6ac131a084b7d9a30b50250c7bbe4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/1b0d8a0fe65c4c4480a3b491339780a7, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/d0133791ff3443fa97f3ed8357c58c01, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/ab25394924ea4bb7a554091a31cf0467, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/ba9d4dc4ef8a458ea13cc327cd1bd7ac, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/1349880594db41a3bf9791631a40119a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/5658c2d97fff4c6fbfdd292de029bed3, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/7d89c915660d47169f4406178365d7ac, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/af29aab345e947b8923a5aa60d201f62] to archive 2024-12-12T05:40:51,354 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/83e80bf221ca:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T05:40:51,356 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/e0ce7d27bc504b64b04a7b0a6ef1516a to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/e0ce7d27bc504b64b04a7b0a6ef1516a 2024-12-12T05:40:51,356 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/41ead117df4246d7831fc859db2c99a4 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/41ead117df4246d7831fc859db2c99a4 2024-12-12T05:40:51,357 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/4ce3ff16c2724a289b60554c5ed9f8d2 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/4ce3ff16c2724a289b60554c5ed9f8d2 2024-12-12T05:40:51,357 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/9fcc26504ca549358c4bf878e358e99a to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/9fcc26504ca549358c4bf878e358e99a 2024-12-12T05:40:51,357 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/d1e6ac131a084b7d9a30b50250c7bbe4 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/d1e6ac131a084b7d9a30b50250c7bbe4 2024-12-12T05:40:51,357 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/1b0d8a0fe65c4c4480a3b491339780a7 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/1b0d8a0fe65c4c4480a3b491339780a7 2024-12-12T05:40:51,357 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/8ee55d90f0f44dfa9fda1cb5b13304ee to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/8ee55d90f0f44dfa9fda1cb5b13304ee 2024-12-12T05:40:51,357 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/e3f54cb8145a46b5831b6c662ce2af7b to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/e3f54cb8145a46b5831b6c662ce2af7b 2024-12-12T05:40:51,358 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/d0133791ff3443fa97f3ed8357c58c01 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/d0133791ff3443fa97f3ed8357c58c01 2024-12-12T05:40:51,358 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/ab25394924ea4bb7a554091a31cf0467 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/ab25394924ea4bb7a554091a31cf0467 2024-12-12T05:40:51,358 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/5658c2d97fff4c6fbfdd292de029bed3 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/5658c2d97fff4c6fbfdd292de029bed3 2024-12-12T05:40:51,358 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/ba9d4dc4ef8a458ea13cc327cd1bd7ac to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/ba9d4dc4ef8a458ea13cc327cd1bd7ac 2024-12-12T05:40:51,359 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/7d89c915660d47169f4406178365d7ac to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/7d89c915660d47169f4406178365d7ac 2024-12-12T05:40:51,359 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/1349880594db41a3bf9791631a40119a to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/1349880594db41a3bf9791631a40119a 2024-12-12T05:40:51,359 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/af29aab345e947b8923a5aa60d201f62 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/af29aab345e947b8923a5aa60d201f62 2024-12-12T05:40:51,362 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/83e80bf221ca:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/43efc29309b949598dda677f14cea6bb, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/711a419f5bca44649a94f1d8453a5d39, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/214a56f00b334dd7bdf3829ced0193fa, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/c026212de3f348638edd41a64bdc0d8b, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/0a766f3e1c6d415d980e37d7451d97b4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/14d277a65f5b4a5cba952ec0597c3c32, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/e2a9b61a35954e7fb0988e41952407bb, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/53f107ab2ad045da85b27ba9f3df217c, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/46cb365f7760463783d8df1098e44019, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/1edcd1921d434d1f80927f01ee76b82d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/9822f646fc3141d997a4b10d59b31e4a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/f7f21cb8fe5e4d3ea270f5fc62ba240d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/1267ed261f52422cbef251f44c46d307, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/3d76198a0c744c44b24bb32925625020, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/ba12558c5cf74007b9b50d65f911c7e8] to archive 2024-12-12T05:40:51,362 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/83e80bf221ca:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T05:40:51,365 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/711a419f5bca44649a94f1d8453a5d39 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/711a419f5bca44649a94f1d8453a5d39 2024-12-12T05:40:51,365 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/214a56f00b334dd7bdf3829ced0193fa to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/214a56f00b334dd7bdf3829ced0193fa 2024-12-12T05:40:51,365 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/43efc29309b949598dda677f14cea6bb to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/43efc29309b949598dda677f14cea6bb 2024-12-12T05:40:51,365 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/c026212de3f348638edd41a64bdc0d8b to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/c026212de3f348638edd41a64bdc0d8b 2024-12-12T05:40:51,365 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/0a766f3e1c6d415d980e37d7451d97b4 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/0a766f3e1c6d415d980e37d7451d97b4 2024-12-12T05:40:51,365 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/e2a9b61a35954e7fb0988e41952407bb to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/e2a9b61a35954e7fb0988e41952407bb 2024-12-12T05:40:51,365 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/53f107ab2ad045da85b27ba9f3df217c to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/53f107ab2ad045da85b27ba9f3df217c 2024-12-12T05:40:51,365 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/14d277a65f5b4a5cba952ec0597c3c32 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/14d277a65f5b4a5cba952ec0597c3c32 2024-12-12T05:40:51,367 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/9822f646fc3141d997a4b10d59b31e4a to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/9822f646fc3141d997a4b10d59b31e4a 2024-12-12T05:40:51,367 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/1edcd1921d434d1f80927f01ee76b82d to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/1edcd1921d434d1f80927f01ee76b82d 2024-12-12T05:40:51,367 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/f7f21cb8fe5e4d3ea270f5fc62ba240d to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/f7f21cb8fe5e4d3ea270f5fc62ba240d 2024-12-12T05:40:51,367 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/1267ed261f52422cbef251f44c46d307 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/1267ed261f52422cbef251f44c46d307 2024-12-12T05:40:51,367 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/ba12558c5cf74007b9b50d65f911c7e8 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/ba12558c5cf74007b9b50d65f911c7e8 2024-12-12T05:40:51,367 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/46cb365f7760463783d8df1098e44019 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/46cb365f7760463783d8df1098e44019 2024-12-12T05:40:51,367 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/3d76198a0c744c44b24bb32925625020 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/3d76198a0c744c44b24bb32925625020 2024-12-12T05:40:51,371 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/83e80bf221ca:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/1050a2343c7b47d6acfa51ad9f7c39d3, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/677658c166824a6e94e516ca4926a79e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/197e0becccf5483ab4ab699278d302d5, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/7e20e3637ca54ab180c85e4d759a5e3f, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/9a0e61227c3f4a9a82748bdf469e7cf2, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/e241ccd1b5464c26bf8831d1e5b1a4e2, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/66ce99667eed425287e27636e9e77464, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/2e16b26fa61a43a997d100014d6d8dd9, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/09f71ba085204133a088c1355a81a20a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/61670084a1214e2aa67d10ccdf185c71, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/7f357cebaa1f48c592d7cc897c0894df, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/d72c3a1e3f1c49039b837ceb16c86849, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/b9a26a98a5404aedae9567f279e2175c, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/95ad46cc01fb4c89bb2e50cf13fde8c4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/fcf227109a164fac8734ba9daaa8ea1e] to archive 2024-12-12T05:40:51,372 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/83e80bf221ca:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T05:40:51,374 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/1050a2343c7b47d6acfa51ad9f7c39d3 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/1050a2343c7b47d6acfa51ad9f7c39d3 2024-12-12T05:40:51,374 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/e241ccd1b5464c26bf8831d1e5b1a4e2 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/e241ccd1b5464c26bf8831d1e5b1a4e2 2024-12-12T05:40:51,374 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/677658c166824a6e94e516ca4926a79e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/677658c166824a6e94e516ca4926a79e 2024-12-12T05:40:51,374 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/7e20e3637ca54ab180c85e4d759a5e3f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/7e20e3637ca54ab180c85e4d759a5e3f 2024-12-12T05:40:51,375 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/9a0e61227c3f4a9a82748bdf469e7cf2 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/9a0e61227c3f4a9a82748bdf469e7cf2 2024-12-12T05:40:51,375 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/197e0becccf5483ab4ab699278d302d5 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/197e0becccf5483ab4ab699278d302d5 2024-12-12T05:40:51,375 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/66ce99667eed425287e27636e9e77464 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/66ce99667eed425287e27636e9e77464 2024-12-12T05:40:51,375 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/2e16b26fa61a43a997d100014d6d8dd9 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/2e16b26fa61a43a997d100014d6d8dd9 2024-12-12T05:40:51,376 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/95ad46cc01fb4c89bb2e50cf13fde8c4 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/95ad46cc01fb4c89bb2e50cf13fde8c4 2024-12-12T05:40:51,376 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/7f357cebaa1f48c592d7cc897c0894df to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/7f357cebaa1f48c592d7cc897c0894df 2024-12-12T05:40:51,376 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/d72c3a1e3f1c49039b837ceb16c86849 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/d72c3a1e3f1c49039b837ceb16c86849 2024-12-12T05:40:51,376 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/b9a26a98a5404aedae9567f279e2175c to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/b9a26a98a5404aedae9567f279e2175c 2024-12-12T05:40:51,376 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/09f71ba085204133a088c1355a81a20a to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/09f71ba085204133a088c1355a81a20a 2024-12-12T05:40:51,376 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/61670084a1214e2aa67d10ccdf185c71 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/61670084a1214e2aa67d10ccdf185c71 2024-12-12T05:40:51,376 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/fcf227109a164fac8734ba9daaa8ea1e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/fcf227109a164fac8734ba9daaa8ea1e 2024-12-12T05:40:51,449 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:51,449 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T05:40:51,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:51,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:51,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:51,450 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:51,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:51,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:51,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-12T05:40:51,525 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:51,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44524 deadline: 1733982111522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:51,591 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=260 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/07f9aa0e92704e809fb8727e6b3f9b9a 2024-12-12T05:40:51,596 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/36c88437842c4102b159b1fa3e467f3b is 50, key is test_row_0/C:col10/1733982050357/Put/seqid=0 2024-12-12T05:40:51,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742232_1408 (size=12251) 2024-12-12T05:40:51,601 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:51,601 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T05:40:51,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:51,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:51,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:51,602 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:51,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:51,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:51,749 DEBUG [Thread-1540 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x627cad17 to 127.0.0.1:60303 2024-12-12T05:40:51,749 DEBUG [Thread-1540 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:40:51,751 DEBUG [Thread-1538 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x787e5169 to 127.0.0.1:60303 2024-12-12T05:40:51,751 DEBUG [Thread-1538 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:40:51,751 DEBUG [Thread-1542 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x39387e4d to 127.0.0.1:60303 2024-12-12T05:40:51,751 DEBUG [Thread-1542 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:40:51,752 DEBUG [Thread-1536 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x37ec8e3b to 127.0.0.1:60303 2024-12-12T05:40:51,752 DEBUG [Thread-1536 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:40:51,753 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:51,753 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T05:40:51,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:51,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:51,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:51,754 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:51,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:51,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:51,755 DEBUG [Thread-1544 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x238db126 to 127.0.0.1:60303 2024-12-12T05:40:51,755 DEBUG [Thread-1544 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:40:51,905 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:51,906 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T05:40:51,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:51,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:51,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:51,907 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:51,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:51,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:52,001 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=260 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/36c88437842c4102b159b1fa3e467f3b 2024-12-12T05:40:52,009 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/ff6ca012bddb48c7931b64cabb594a50 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/ff6ca012bddb48c7931b64cabb594a50 2024-12-12T05:40:52,013 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/ff6ca012bddb48c7931b64cabb594a50, entries=200, sequenceid=260, filesize=39.0 K 2024-12-12T05:40:52,014 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/07f9aa0e92704e809fb8727e6b3f9b9a as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/07f9aa0e92704e809fb8727e6b3f9b9a 2024-12-12T05:40:52,017 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/07f9aa0e92704e809fb8727e6b3f9b9a, entries=150, sequenceid=260, filesize=12.0 K 2024-12-12T05:40:52,018 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/36c88437842c4102b159b1fa3e467f3b as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/36c88437842c4102b159b1fa3e467f3b 2024-12-12T05:40:52,021 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/36c88437842c4102b159b1fa3e467f3b, entries=150, sequenceid=260, filesize=12.0 K 2024-12-12T05:40:52,021 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 28540e98a53f5d1213a72e3944e7527f in 1663ms, sequenceid=260, compaction requested=true 2024-12-12T05:40:52,021 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:52,021 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 28540e98a53f5d1213a72e3944e7527f:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:40:52,021 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:52,021 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 28540e98a53f5d1213a72e3944e7527f:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:40:52,021 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:52,021 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:52,021 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 28540e98a53f5d1213a72e3944e7527f:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:40:52,021 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:52,021 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:40:52,022 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102621 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:52,022 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37065 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:52,022 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 28540e98a53f5d1213a72e3944e7527f/A is initiating minor compaction (all files) 2024-12-12T05:40:52,022 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 28540e98a53f5d1213a72e3944e7527f/B is initiating minor compaction (all files) 2024-12-12T05:40:52,022 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 28540e98a53f5d1213a72e3944e7527f/A in TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:52,022 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 28540e98a53f5d1213a72e3944e7527f/B in TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:52,022 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/32acfc00c92e44fb8f192eadcf4d0bd4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/6b3f4c59f18f4f069f7a2c84bae695fb, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/07f9aa0e92704e809fb8727e6b3f9b9a] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp, totalSize=36.2 K 2024-12-12T05:40:52,022 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/9f8deb61b255433cb456df75ff582ec8, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/c85bb2ecfbaf46da8abda431f5b0d44f, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/ff6ca012bddb48c7931b64cabb594a50] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp, totalSize=100.2 K 2024-12-12T05:40:52,022 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:52,023 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. files: [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/9f8deb61b255433cb456df75ff582ec8, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/c85bb2ecfbaf46da8abda431f5b0d44f, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/ff6ca012bddb48c7931b64cabb594a50] 2024-12-12T05:40:52,023 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 32acfc00c92e44fb8f192eadcf4d0bd4, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1733982044528 2024-12-12T05:40:52,023 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9f8deb61b255433cb456df75ff582ec8, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1733982044528 2024-12-12T05:40:52,023 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b3f4c59f18f4f069f7a2c84bae695fb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733982046027 2024-12-12T05:40:52,023 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting c85bb2ecfbaf46da8abda431f5b0d44f, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733982046027 2024-12-12T05:40:52,023 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 07f9aa0e92704e809fb8727e6b3f9b9a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1733982048226 2024-12-12T05:40:52,023 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff6ca012bddb48c7931b64cabb594a50, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1733982048226 2024-12-12T05:40:52,027 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:52,028 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 28540e98a53f5d1213a72e3944e7527f#B#compaction#339 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:52,028 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/b79872f234954b6a98be7c5b47a62ad3 is 50, key is test_row_0/B:col10/1733982050357/Put/seqid=0 2024-12-12T05:40:52,029 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412124d09e28e5cb945c486467ea7c8d3c01d_28540e98a53f5d1213a72e3944e7527f store=[table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:52,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742233_1409 (size=12354) 2024-12-12T05:40:52,032 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412124d09e28e5cb945c486467ea7c8d3c01d_28540e98a53f5d1213a72e3944e7527f, store=[table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:52,033 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412124d09e28e5cb945c486467ea7c8d3c01d_28540e98a53f5d1213a72e3944e7527f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:52,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742234_1410 (size=4469) 2024-12-12T05:40:52,060 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:52,060 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T05:40:52,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:52,060 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing 28540e98a53f5d1213a72e3944e7527f 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-12T05:40:52,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=A 2024-12-12T05:40:52,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:52,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=B 2024-12-12T05:40:52,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:52,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=C 2024-12-12T05:40:52,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:52,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212c87b44b06c854f7a80cf93b188ad66be_28540e98a53f5d1213a72e3944e7527f is 50, key is test_row_0/A:col10/1733982050401/Put/seqid=0 2024-12-12T05:40:52,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742235_1411 (size=12454) 2024-12-12T05:40:52,439 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 28540e98a53f5d1213a72e3944e7527f#A#compaction#340 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:52,440 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/86ef878bfa3c4cf1b6c652acf49b11a5 is 175, key is test_row_0/A:col10/1733982050357/Put/seqid=0 2024-12-12T05:40:52,442 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/b79872f234954b6a98be7c5b47a62ad3 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/b79872f234954b6a98be7c5b47a62ad3 2024-12-12T05:40:52,447 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 28540e98a53f5d1213a72e3944e7527f/B of 28540e98a53f5d1213a72e3944e7527f into b79872f234954b6a98be7c5b47a62ad3(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:52,447 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:52,447 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f., storeName=28540e98a53f5d1213a72e3944e7527f/B, priority=13, startTime=1733982052021; duration=0sec 2024-12-12T05:40:52,447 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:40:52,447 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 28540e98a53f5d1213a72e3944e7527f:B 2024-12-12T05:40:52,447 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:40:52,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742236_1412 (size=31308) 2024-12-12T05:40:52,449 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37065 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:40:52,449 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 28540e98a53f5d1213a72e3944e7527f/C is initiating minor compaction (all files) 2024-12-12T05:40:52,449 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 28540e98a53f5d1213a72e3944e7527f/C in TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:52,449 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/11927047ad994df897a54b351e141737, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/f6151b47284d4849915a57176b6fb3b4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/36c88437842c4102b159b1fa3e467f3b] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp, totalSize=36.2 K 2024-12-12T05:40:52,449 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 11927047ad994df897a54b351e141737, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1733982044528 2024-12-12T05:40:52,449 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting f6151b47284d4849915a57176b6fb3b4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733982046027 2024-12-12T05:40:52,450 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 36c88437842c4102b159b1fa3e467f3b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1733982048226 2024-12-12T05:40:52,455 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 28540e98a53f5d1213a72e3944e7527f#C#compaction#342 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:40:52,455 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/ab1869050d5246689f00e62f71606032 is 50, key is test_row_0/C:col10/1733982050357/Put/seqid=0 2024-12-12T05:40:52,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742237_1413 (size=12354) 2024-12-12T05:40:52,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:40:52,471 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212c87b44b06c854f7a80cf93b188ad66be_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c87b44b06c854f7a80cf93b188ad66be_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:52,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/2c824685863a4595bb7d703e2190fcb3, store: [table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:52,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/2c824685863a4595bb7d703e2190fcb3 is 175, key is test_row_0/A:col10/1733982050401/Put/seqid=0 2024-12-12T05:40:52,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742238_1414 (size=31255) 2024-12-12T05:40:52,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-12T05:40:52,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:52,528 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. as already flushing 2024-12-12T05:40:52,528 DEBUG [Thread-1533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x683f8469 to 127.0.0.1:60303 2024-12-12T05:40:52,528 DEBUG [Thread-1533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:40:52,679 DEBUG [Thread-1527 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x167a78b0 to 127.0.0.1:60303 2024-12-12T05:40:52,679 DEBUG [Thread-1527 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:40:52,696 DEBUG [Thread-1525 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x09f472e0 to 127.0.0.1:60303 2024-12-12T05:40:52,696 DEBUG [Thread-1525 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:40:52,699 DEBUG [Thread-1529 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5aee939b to 127.0.0.1:60303 2024-12-12T05:40:52,700 DEBUG [Thread-1529 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:40:52,706 DEBUG [Thread-1531 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f49665c to 127.0.0.1:60303 2024-12-12T05:40:52,706 DEBUG [Thread-1531 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:40:52,859 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/86ef878bfa3c4cf1b6c652acf49b11a5 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/86ef878bfa3c4cf1b6c652acf49b11a5 2024-12-12T05:40:52,864 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 28540e98a53f5d1213a72e3944e7527f/A of 28540e98a53f5d1213a72e3944e7527f into 86ef878bfa3c4cf1b6c652acf49b11a5(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:52,864 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:52,864 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/ab1869050d5246689f00e62f71606032 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/ab1869050d5246689f00e62f71606032 2024-12-12T05:40:52,864 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f., storeName=28540e98a53f5d1213a72e3944e7527f/A, priority=13, startTime=1733982052021; duration=0sec 2024-12-12T05:40:52,864 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:52,864 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 28540e98a53f5d1213a72e3944e7527f:A 2024-12-12T05:40:52,869 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 28540e98a53f5d1213a72e3944e7527f/C of 28540e98a53f5d1213a72e3944e7527f into ab1869050d5246689f00e62f71606032(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:40:52,869 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:52,869 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f., storeName=28540e98a53f5d1213a72e3944e7527f/C, priority=13, startTime=1733982052021; duration=0sec 2024-12-12T05:40:52,869 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:40:52,869 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 28540e98a53f5d1213a72e3944e7527f:C 2024-12-12T05:40:52,876 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=273, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/2c824685863a4595bb7d703e2190fcb3 2024-12-12T05:40:52,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/6f11f03807954fa190cc80a16b61e8c8 is 50, key is test_row_0/B:col10/1733982050401/Put/seqid=0 2024-12-12T05:40:52,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742239_1415 (size=12301) 2024-12-12T05:40:53,290 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/6f11f03807954fa190cc80a16b61e8c8 2024-12-12T05:40:53,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/40dc0b0482724f43bc19ea09634f4cf9 is 50, key is test_row_0/C:col10/1733982050401/Put/seqid=0 2024-12-12T05:40:53,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742240_1416 (size=12301) 2024-12-12T05:40:53,710 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/40dc0b0482724f43bc19ea09634f4cf9 2024-12-12T05:40:53,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/2c824685863a4595bb7d703e2190fcb3 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/2c824685863a4595bb7d703e2190fcb3 2024-12-12T05:40:53,717 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/2c824685863a4595bb7d703e2190fcb3, entries=150, sequenceid=273, filesize=30.5 K 2024-12-12T05:40:53,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/6f11f03807954fa190cc80a16b61e8c8 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/6f11f03807954fa190cc80a16b61e8c8 2024-12-12T05:40:53,722 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/6f11f03807954fa190cc80a16b61e8c8, entries=150, sequenceid=273, filesize=12.0 K 2024-12-12T05:40:53,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/40dc0b0482724f43bc19ea09634f4cf9 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/40dc0b0482724f43bc19ea09634f4cf9 2024-12-12T05:40:53,728 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/40dc0b0482724f43bc19ea09634f4cf9, entries=150, sequenceid=273, filesize=12.0 K 2024-12-12T05:40:53,728 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=33.54 KB/34350 for 28540e98a53f5d1213a72e3944e7527f in 1668ms, sequenceid=273, compaction requested=false 2024-12-12T05:40:53,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:53,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:53,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-12-12T05:40:53,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-12-12T05:40:53,730 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-12-12T05:40:53,730 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.3440 sec 2024-12-12T05:40:53,731 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 3.3460 sec 2024-12-12T05:40:54,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-12T05:40:54,490 INFO [Thread-1535 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-12-12T05:40:54,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-12T05:40:54,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 33 2024-12-12T05:40:54,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 43 2024-12-12T05:40:54,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 34 2024-12-12T05:40:54,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 44 2024-12-12T05:40:54,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 63 2024-12-12T05:40:54,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-12T05:40:54,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-12T05:40:54,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3243 2024-12-12T05:40:54,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9729 rows 2024-12-12T05:40:54,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3228 2024-12-12T05:40:54,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9684 rows 2024-12-12T05:40:54,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3236 2024-12-12T05:40:54,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9708 rows 2024-12-12T05:40:54,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3228 2024-12-12T05:40:54,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9684 rows 2024-12-12T05:40:54,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3236 2024-12-12T05:40:54,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9708 rows 2024-12-12T05:40:54,492 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-12T05:40:54,492 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2df33cdf to 127.0.0.1:60303 2024-12-12T05:40:54,492 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:40:54,497 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-12T05:40:54,498 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-12T05:40:54,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-12T05:40:54,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-12T05:40:54,502 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733982054501"}]},"ts":"1733982054501"} 2024-12-12T05:40:54,502 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-12T05:40:54,509 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-12T05:40:54,510 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T05:40:54,510 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=118, ppid=117, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=28540e98a53f5d1213a72e3944e7527f, UNASSIGN}] 2024-12-12T05:40:54,511 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=118, ppid=117, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=28540e98a53f5d1213a72e3944e7527f, UNASSIGN 2024-12-12T05:40:54,511 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=118 updating hbase:meta row=28540e98a53f5d1213a72e3944e7527f, regionState=CLOSING, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:54,512 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T05:40:54,512 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; CloseRegionProcedure 28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566}] 2024-12-12T05:40:54,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-12T05:40:54,663 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:54,664 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(124): Close 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:54,664 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T05:40:54,665 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1681): Closing 28540e98a53f5d1213a72e3944e7527f, disabling compactions & flushes 2024-12-12T05:40:54,665 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:54,665 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:54,665 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. after waiting 0 ms 2024-12-12T05:40:54,665 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:54,665 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(2837): Flushing 28540e98a53f5d1213a72e3944e7527f 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-12T05:40:54,665 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=A 2024-12-12T05:40:54,666 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:54,666 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=B 2024-12-12T05:40:54,666 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:54,666 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 28540e98a53f5d1213a72e3944e7527f, store=C 2024-12-12T05:40:54,666 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:54,675 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121264a05adf7e0c4ee2910eee0fef7c6e3c_28540e98a53f5d1213a72e3944e7527f is 50, key is test_row_0/A:col10/1733982052678/Put/seqid=0 2024-12-12T05:40:54,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742241_1417 (size=12454) 2024-12-12T05:40:54,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-12T05:40:55,080 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:40:55,090 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121264a05adf7e0c4ee2910eee0fef7c6e3c_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121264a05adf7e0c4ee2910eee0fef7c6e3c_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:55,091 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/de99dd9beb9a4817bf1b973146849a16, store: [table=TestAcidGuarantees family=A region=28540e98a53f5d1213a72e3944e7527f] 2024-12-12T05:40:55,092 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/de99dd9beb9a4817bf1b973146849a16 is 175, key is test_row_0/A:col10/1733982052678/Put/seqid=0 2024-12-12T05:40:55,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742242_1418 (size=31255) 2024-12-12T05:40:55,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-12T05:40:55,498 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=284, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/de99dd9beb9a4817bf1b973146849a16 2024-12-12T05:40:55,511 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/26c586dc3e1c4827b7963cef4f5d1d0c is 50, key is test_row_0/B:col10/1733982052678/Put/seqid=0 2024-12-12T05:40:55,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742243_1419 (size=12301) 2024-12-12T05:40:55,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-12T05:40:55,917 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/26c586dc3e1c4827b7963cef4f5d1d0c 2024-12-12T05:40:55,929 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/91dc585b8ffe4946a9132899bd16efc9 is 50, key is test_row_0/C:col10/1733982052678/Put/seqid=0 2024-12-12T05:40:55,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742244_1420 (size=12301) 2024-12-12T05:40:56,334 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/91dc585b8ffe4946a9132899bd16efc9 2024-12-12T05:40:56,344 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/A/de99dd9beb9a4817bf1b973146849a16 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/de99dd9beb9a4817bf1b973146849a16 2024-12-12T05:40:56,351 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/de99dd9beb9a4817bf1b973146849a16, entries=150, sequenceid=284, filesize=30.5 K 2024-12-12T05:40:56,352 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/B/26c586dc3e1c4827b7963cef4f5d1d0c as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/26c586dc3e1c4827b7963cef4f5d1d0c 2024-12-12T05:40:56,355 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/26c586dc3e1c4827b7963cef4f5d1d0c, entries=150, sequenceid=284, filesize=12.0 K 2024-12-12T05:40:56,356 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/.tmp/C/91dc585b8ffe4946a9132899bd16efc9 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/91dc585b8ffe4946a9132899bd16efc9 2024-12-12T05:40:56,359 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/91dc585b8ffe4946a9132899bd16efc9, entries=150, sequenceid=284, filesize=12.0 K 2024-12-12T05:40:56,360 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 28540e98a53f5d1213a72e3944e7527f in 1695ms, sequenceid=284, compaction requested=true 2024-12-12T05:40:56,361 DEBUG [StoreCloser-TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/9f8deb61b255433cb456df75ff582ec8, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/c85bb2ecfbaf46da8abda431f5b0d44f, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/ff6ca012bddb48c7931b64cabb594a50] to archive 2024-12-12T05:40:56,361 DEBUG [StoreCloser-TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T05:40:56,363 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/ff6ca012bddb48c7931b64cabb594a50 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/ff6ca012bddb48c7931b64cabb594a50 2024-12-12T05:40:56,363 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/9f8deb61b255433cb456df75ff582ec8 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/9f8deb61b255433cb456df75ff582ec8 2024-12-12T05:40:56,364 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/c85bb2ecfbaf46da8abda431f5b0d44f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/c85bb2ecfbaf46da8abda431f5b0d44f 2024-12-12T05:40:56,365 DEBUG [StoreCloser-TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/32acfc00c92e44fb8f192eadcf4d0bd4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/6b3f4c59f18f4f069f7a2c84bae695fb, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/07f9aa0e92704e809fb8727e6b3f9b9a] to archive 2024-12-12T05:40:56,365 DEBUG [StoreCloser-TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T05:40:56,367 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/6b3f4c59f18f4f069f7a2c84bae695fb to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/6b3f4c59f18f4f069f7a2c84bae695fb 2024-12-12T05:40:56,367 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/32acfc00c92e44fb8f192eadcf4d0bd4 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/32acfc00c92e44fb8f192eadcf4d0bd4 2024-12-12T05:40:56,367 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/07f9aa0e92704e809fb8727e6b3f9b9a to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/07f9aa0e92704e809fb8727e6b3f9b9a 2024-12-12T05:40:56,368 DEBUG [StoreCloser-TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/11927047ad994df897a54b351e141737, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/f6151b47284d4849915a57176b6fb3b4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/36c88437842c4102b159b1fa3e467f3b] to archive 2024-12-12T05:40:56,369 DEBUG [StoreCloser-TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T05:40:56,371 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/11927047ad994df897a54b351e141737 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/11927047ad994df897a54b351e141737 2024-12-12T05:40:56,371 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/36c88437842c4102b159b1fa3e467f3b to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/36c88437842c4102b159b1fa3e467f3b 2024-12-12T05:40:56,371 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/f6151b47284d4849915a57176b6fb3b4 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/f6151b47284d4849915a57176b6fb3b4 2024-12-12T05:40:56,374 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/recovered.edits/287.seqid, newMaxSeqId=287, maxSeqId=4 2024-12-12T05:40:56,375 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f. 2024-12-12T05:40:56,375 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1635): Region close journal for 28540e98a53f5d1213a72e3944e7527f: 2024-12-12T05:40:56,376 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(170): Closed 28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:56,376 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=118 updating hbase:meta row=28540e98a53f5d1213a72e3944e7527f, regionState=CLOSED 2024-12-12T05:40:56,378 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-12-12T05:40:56,378 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; CloseRegionProcedure 28540e98a53f5d1213a72e3944e7527f, server=83e80bf221ca,46457,1733981928566 in 1.8650 sec 2024-12-12T05:40:56,380 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=118, resume processing ppid=117 2024-12-12T05:40:56,380 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, ppid=117, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=28540e98a53f5d1213a72e3944e7527f, UNASSIGN in 1.8680 sec 2024-12-12T05:40:56,381 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-12-12T05:40:56,381 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8700 sec 2024-12-12T05:40:56,381 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733982056381"}]},"ts":"1733982056381"} 2024-12-12T05:40:56,382 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-12T05:40:56,418 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-12T05:40:56,421 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9210 sec 2024-12-12T05:40:56,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-12T05:40:56,608 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-12-12T05:40:56,610 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-12T05:40:56,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:40:56,614 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=120, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:40:56,616 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=120, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:40:56,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-12T05:40:56,619 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:56,623 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A, FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B, FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C, FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/recovered.edits] 2024-12-12T05:40:56,630 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/86ef878bfa3c4cf1b6c652acf49b11a5 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/86ef878bfa3c4cf1b6c652acf49b11a5 2024-12-12T05:40:56,631 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/2c824685863a4595bb7d703e2190fcb3 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/2c824685863a4595bb7d703e2190fcb3 2024-12-12T05:40:56,631 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/de99dd9beb9a4817bf1b973146849a16 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/A/de99dd9beb9a4817bf1b973146849a16 2024-12-12T05:40:56,634 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/26c586dc3e1c4827b7963cef4f5d1d0c to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/26c586dc3e1c4827b7963cef4f5d1d0c 2024-12-12T05:40:56,634 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/b79872f234954b6a98be7c5b47a62ad3 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/b79872f234954b6a98be7c5b47a62ad3 2024-12-12T05:40:56,635 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/6f11f03807954fa190cc80a16b61e8c8 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/B/6f11f03807954fa190cc80a16b61e8c8 2024-12-12T05:40:56,638 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/40dc0b0482724f43bc19ea09634f4cf9 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/40dc0b0482724f43bc19ea09634f4cf9 2024-12-12T05:40:56,638 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/91dc585b8ffe4946a9132899bd16efc9 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/91dc585b8ffe4946a9132899bd16efc9 2024-12-12T05:40:56,639 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/ab1869050d5246689f00e62f71606032 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/C/ab1869050d5246689f00e62f71606032 2024-12-12T05:40:56,643 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/recovered.edits/287.seqid to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f/recovered.edits/287.seqid 2024-12-12T05:40:56,644 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:56,644 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-12T05:40:56,645 DEBUG [PEWorker-4 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-12T05:40:56,646 DEBUG [PEWorker-4 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-12T05:40:56,657 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121203ef8048b3be43d598dd5d469f9a1542_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121203ef8048b3be43d598dd5d469f9a1542_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:56,657 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412120fcc4432b2594908bf7837652d8bbc7f_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412120fcc4432b2594908bf7837652d8bbc7f_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:56,657 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121227b05fe861ff4356b92723f9ab336246_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121227b05fe861ff4356b92723f9ab336246_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:56,657 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123000542e5e6347c9bd8c4b062ff0316c_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123000542e5e6347c9bd8c4b062ff0316c_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:56,658 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412124f1589e93f41460698482ee9f37f6f4f_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412124f1589e93f41460698482ee9f37f6f4f_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:56,658 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121264a05adf7e0c4ee2910eee0fef7c6e3c_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121264a05adf7e0c4ee2910eee0fef7c6e3c_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:56,658 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121267f437459fb04f37a4e848aa034c46af_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121267f437459fb04f37a4e848aa034c46af_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:56,658 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212723df5f454db45d09ed86d97a811e8ec_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212723df5f454db45d09ed86d97a811e8ec_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:56,660 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412127eea6b74db114c70b5ff7d90500050bc_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412127eea6b74db114c70b5ff7d90500050bc_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:56,661 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412129bf8aaf85d5046dcbc90c5101fe20941_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412129bf8aaf85d5046dcbc90c5101fe20941_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:56,661 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212b20051aa4603454898b8ee38855a5fa9_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212b20051aa4603454898b8ee38855a5fa9_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:56,661 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a3d4791609dc4b50abd057911dee43e9_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a3d4791609dc4b50abd057911dee43e9_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:56,661 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c87b44b06c854f7a80cf93b188ad66be_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c87b44b06c854f7a80cf93b188ad66be_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:56,661 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212f931b9178e6649f2867b63a4bc88617b_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212f931b9178e6649f2867b63a4bc88617b_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:56,661 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212cc2ed61ec73a4a39b5d9c1b2fe5c5e90_28540e98a53f5d1213a72e3944e7527f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212cc2ed61ec73a4a39b5d9c1b2fe5c5e90_28540e98a53f5d1213a72e3944e7527f 2024-12-12T05:40:56,662 DEBUG [PEWorker-4 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-12T05:40:56,664 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=120, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:40:56,665 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-12T05:40:56,666 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-12T05:40:56,667 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=120, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:40:56,667 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-12T05:40:56,667 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733982056667"}]},"ts":"9223372036854775807"} 2024-12-12T05:40:56,669 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-12T05:40:56,669 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 28540e98a53f5d1213a72e3944e7527f, NAME => 'TestAcidGuarantees,,1733982028428.28540e98a53f5d1213a72e3944e7527f.', STARTKEY => '', ENDKEY => ''}] 2024-12-12T05:40:56,669 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-12T05:40:56,669 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733982056669"}]},"ts":"9223372036854775807"} 2024-12-12T05:40:56,670 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-12T05:40:56,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-12T05:40:56,735 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=120, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:40:56,737 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 125 msec 2024-12-12T05:40:56,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-12T05:40:56,920 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-12-12T05:40:56,935 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=245 (was 245), OpenFileDescriptor=446 (was 450), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=259 (was 290), ProcessCount=11 (was 11), AvailableMemoryMB=13255 (was 13280) 2024-12-12T05:40:56,944 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=245, OpenFileDescriptor=446, MaxFileDescriptor=1048576, SystemLoadAverage=259, ProcessCount=11, AvailableMemoryMB=13255 2024-12-12T05:40:56,945 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T05:40:56,945 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T05:40:56,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=121, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-12T05:40:56,947 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T05:40:56,947 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:40:56,947 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 121 2024-12-12T05:40:56,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-12-12T05:40:56,948 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T05:40:56,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742245_1421 (size=963) 2024-12-12T05:40:57,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-12-12T05:40:57,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-12-12T05:40:57,359 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d 2024-12-12T05:40:57,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742246_1422 (size=53) 2024-12-12T05:40:57,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-12-12T05:40:57,791 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:40:57,792 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing ec02d2a34ba48290fed943fe35718728, disabling compactions & flushes 2024-12-12T05:40:57,792 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:40:57,792 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:40:57,792 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. after waiting 0 ms 2024-12-12T05:40:57,792 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:40:57,792 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:40:57,792 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:40:57,793 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T05:40:57,794 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733982057793"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733982057793"}]},"ts":"1733982057793"} 2024-12-12T05:40:57,796 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T05:40:57,797 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T05:40:57,797 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733982057797"}]},"ts":"1733982057797"} 2024-12-12T05:40:57,799 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-12T05:40:57,843 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ec02d2a34ba48290fed943fe35718728, ASSIGN}] 2024-12-12T05:40:57,845 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ec02d2a34ba48290fed943fe35718728, ASSIGN 2024-12-12T05:40:57,846 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=ec02d2a34ba48290fed943fe35718728, ASSIGN; state=OFFLINE, location=83e80bf221ca,46457,1733981928566; forceNewPlan=false, retain=false 2024-12-12T05:40:57,997 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=122 updating hbase:meta row=ec02d2a34ba48290fed943fe35718728, regionState=OPENING, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:58,000 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; OpenRegionProcedure ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566}] 2024-12-12T05:40:58,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-12-12T05:40:58,153 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:58,159 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:40:58,159 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(7285): Opening region: {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} 2024-12-12T05:40:58,160 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees ec02d2a34ba48290fed943fe35718728 2024-12-12T05:40:58,160 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:40:58,160 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(7327): checking encryption for ec02d2a34ba48290fed943fe35718728 2024-12-12T05:40:58,160 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(7330): checking classloading for ec02d2a34ba48290fed943fe35718728 2024-12-12T05:40:58,162 INFO [StoreOpener-ec02d2a34ba48290fed943fe35718728-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region ec02d2a34ba48290fed943fe35718728 2024-12-12T05:40:58,164 INFO [StoreOpener-ec02d2a34ba48290fed943fe35718728-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T05:40:58,164 INFO [StoreOpener-ec02d2a34ba48290fed943fe35718728-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ec02d2a34ba48290fed943fe35718728 columnFamilyName A 2024-12-12T05:40:58,164 DEBUG [StoreOpener-ec02d2a34ba48290fed943fe35718728-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:40:58,165 INFO [StoreOpener-ec02d2a34ba48290fed943fe35718728-1 {}] regionserver.HStore(327): Store=ec02d2a34ba48290fed943fe35718728/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:40:58,165 INFO [StoreOpener-ec02d2a34ba48290fed943fe35718728-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region ec02d2a34ba48290fed943fe35718728 2024-12-12T05:40:58,167 INFO [StoreOpener-ec02d2a34ba48290fed943fe35718728-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T05:40:58,167 INFO [StoreOpener-ec02d2a34ba48290fed943fe35718728-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ec02d2a34ba48290fed943fe35718728 columnFamilyName B 2024-12-12T05:40:58,167 DEBUG [StoreOpener-ec02d2a34ba48290fed943fe35718728-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:40:58,168 INFO [StoreOpener-ec02d2a34ba48290fed943fe35718728-1 {}] regionserver.HStore(327): Store=ec02d2a34ba48290fed943fe35718728/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:40:58,168 INFO [StoreOpener-ec02d2a34ba48290fed943fe35718728-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region ec02d2a34ba48290fed943fe35718728 2024-12-12T05:40:58,170 INFO [StoreOpener-ec02d2a34ba48290fed943fe35718728-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T05:40:58,170 INFO [StoreOpener-ec02d2a34ba48290fed943fe35718728-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ec02d2a34ba48290fed943fe35718728 columnFamilyName C 2024-12-12T05:40:58,171 DEBUG [StoreOpener-ec02d2a34ba48290fed943fe35718728-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:40:58,171 INFO [StoreOpener-ec02d2a34ba48290fed943fe35718728-1 {}] regionserver.HStore(327): Store=ec02d2a34ba48290fed943fe35718728/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:40:58,172 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:40:58,173 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728 2024-12-12T05:40:58,174 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728 2024-12-12T05:40:58,176 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T05:40:58,177 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(1085): writing seq id for ec02d2a34ba48290fed943fe35718728 2024-12-12T05:40:58,179 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T05:40:58,180 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(1102): Opened ec02d2a34ba48290fed943fe35718728; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62254581, jitterRate=-0.07233445346355438}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T05:40:58,180 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(1001): Region open journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:40:58,181 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., pid=123, masterSystemTime=1733982058153 2024-12-12T05:40:58,183 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:40:58,183 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:40:58,183 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=122 updating hbase:meta row=ec02d2a34ba48290fed943fe35718728, regionState=OPEN, openSeqNum=2, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:58,185 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-12T05:40:58,186 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; OpenRegionProcedure ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 in 185 msec 2024-12-12T05:40:58,187 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=122, resume processing ppid=121 2024-12-12T05:40:58,187 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, ppid=121, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=ec02d2a34ba48290fed943fe35718728, ASSIGN in 343 msec 2024-12-12T05:40:58,187 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T05:40:58,188 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733982058188"}]},"ts":"1733982058188"} 2024-12-12T05:40:58,188 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-12T05:40:58,228 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T05:40:58,230 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2830 sec 2024-12-12T05:40:59,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-12-12T05:40:59,058 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 121 completed 2024-12-12T05:40:59,060 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x61d38088 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7d0ab200 2024-12-12T05:40:59,103 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32bb71c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:59,107 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:59,110 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45392, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:59,112 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T05:40:59,114 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33378, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T05:40:59,116 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7043f683 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5871c039 2024-12-12T05:40:59,127 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bc0f7c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:59,128 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2b0c2472 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7daa5922 2024-12-12T05:40:59,135 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b8b6e04, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:59,136 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x34b30c39 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1b7f20c4 2024-12-12T05:40:59,144 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bc486e1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:59,145 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d672ed2 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5f7c40ba 2024-12-12T05:40:59,152 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2070263a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:59,154 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7cf40102 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@41b0e7b6 2024-12-12T05:40:59,161 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6050584c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:59,163 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0f2423f3 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6dd48863 2024-12-12T05:40:59,168 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8a917b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:59,169 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x184771cf to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@51196534 2024-12-12T05:40:59,176 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54c2725, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:59,177 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x076f0408 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1dc5e114 2024-12-12T05:40:59,185 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79d49886, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:59,186 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0c692575 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3e96b8ad 2024-12-12T05:40:59,193 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@635b1751, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:59,194 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1cbd2497 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@17e5a47d 2024-12-12T05:40:59,201 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2cbfd84f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:40:59,204 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:40:59,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees 2024-12-12T05:40:59,205 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:40:59,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-12T05:40:59,205 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:40:59,205 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:40:59,211 DEBUG [hconnection-0xef4cf2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:59,212 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45402, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:59,214 DEBUG [hconnection-0x1a324fcb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:59,215 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45408, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:59,219 DEBUG [hconnection-0x6d91d148-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:59,219 DEBUG [hconnection-0x4a278c7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:59,220 DEBUG [hconnection-0x4782cfb8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:59,220 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45418, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:59,220 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45434, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:59,221 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45420, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:59,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on ec02d2a34ba48290fed943fe35718728 2024-12-12T05:40:59,222 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ec02d2a34ba48290fed943fe35718728 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T05:40:59,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=A 2024-12-12T05:40:59,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:59,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=B 2024-12-12T05:40:59,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:59,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=C 2024-12-12T05:40:59,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:40:59,229 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:59,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:59,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982119229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:59,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982119229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:59,230 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:59,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982119230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:59,230 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:59,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982119230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:59,231 DEBUG [hconnection-0x68cd0e8b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:59,231 DEBUG [hconnection-0x7ef1d7ae-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:59,232 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45442, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:59,232 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45444, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:59,233 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:59,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982119233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:59,237 DEBUG [hconnection-0x3cd82011-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:59,237 DEBUG [hconnection-0x684065f9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:59,238 DEBUG [hconnection-0x5ec93005-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:40:59,238 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45456, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:59,238 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45458, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:59,238 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45476, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:40:59,254 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/4f213a1f835e4c9ca734fb750c000d6e is 50, key is test_row_0/A:col10/1733982059219/Put/seqid=0 2024-12-12T05:40:59,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742247_1423 (size=12001) 2024-12-12T05:40:59,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-12T05:40:59,334 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:59,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982119331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:59,334 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:59,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982119331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:59,334 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:59,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982119331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:59,334 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:59,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982119331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:59,336 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:59,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982119334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:59,356 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:59,357 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-12T05:40:59,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:40:59,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:40:59,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:40:59,357 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:59,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:59,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:59,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-12T05:40:59,508 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:59,509 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-12T05:40:59,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:40:59,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:40:59,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:40:59,509 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:59,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:59,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:59,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:59,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982119535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:59,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:59,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982119535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:59,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:59,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982119535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:59,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:59,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982119535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:59,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:59,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982119538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:59,661 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:59,661 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-12T05:40:59,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:40:59,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:40:59,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:40:59,661 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:59,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:59,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:59,664 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/4f213a1f835e4c9ca734fb750c000d6e 2024-12-12T05:40:59,686 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/75617546cd6442838b579b290e9af670 is 50, key is test_row_0/B:col10/1733982059219/Put/seqid=0 2024-12-12T05:40:59,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742248_1424 (size=12001) 2024-12-12T05:40:59,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-12T05:40:59,813 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:59,813 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-12T05:40:59,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:40:59,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:40:59,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:40:59,813 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:59,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:59,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:59,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:59,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982119841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:59,843 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:59,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982119841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:59,843 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:59,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982119841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:59,844 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:59,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982119842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:59,845 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:40:59,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982119843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:40:59,964 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:40:59,965 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-12T05:40:59,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:40:59,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:40:59,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:40:59,965 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:59,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:40:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:00,089 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/75617546cd6442838b579b290e9af670 2024-12-12T05:41:00,106 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/acaf7a38405743c9b6b634b62e658555 is 50, key is test_row_0/C:col10/1733982059219/Put/seqid=0 2024-12-12T05:41:00,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742249_1425 (size=12001) 2024-12-12T05:41:00,117 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:00,117 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-12T05:41:00,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:00,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:00,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:00,118 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:00,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:00,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:00,269 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:00,269 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-12T05:41:00,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:00,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:00,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:00,270 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:00,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:00,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:00,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-12T05:41:00,346 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:00,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982120344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:00,347 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:00,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982120345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:00,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:00,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982120346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:00,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:00,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982120348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:00,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:00,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982120348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:00,421 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:00,421 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-12T05:41:00,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:00,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:00,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:00,422 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:00,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:00,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:00,509 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/acaf7a38405743c9b6b634b62e658555 2024-12-12T05:41:00,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/4f213a1f835e4c9ca734fb750c000d6e as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/4f213a1f835e4c9ca734fb750c000d6e 2024-12-12T05:41:00,514 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/4f213a1f835e4c9ca734fb750c000d6e, entries=150, sequenceid=13, filesize=11.7 K 2024-12-12T05:41:00,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/75617546cd6442838b579b290e9af670 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/75617546cd6442838b579b290e9af670 2024-12-12T05:41:00,517 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/75617546cd6442838b579b290e9af670, entries=150, sequenceid=13, filesize=11.7 K 2024-12-12T05:41:00,517 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/acaf7a38405743c9b6b634b62e658555 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/acaf7a38405743c9b6b634b62e658555 2024-12-12T05:41:00,520 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/acaf7a38405743c9b6b634b62e658555, entries=150, sequenceid=13, filesize=11.7 K 2024-12-12T05:41:00,520 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for ec02d2a34ba48290fed943fe35718728 in 1298ms, sequenceid=13, compaction requested=false 2024-12-12T05:41:00,520 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-12T05:41:00,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:00,573 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:00,573 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-12T05:41:00,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:00,574 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2837): Flushing ec02d2a34ba48290fed943fe35718728 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T05:41:00,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=A 2024-12-12T05:41:00,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:00,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=B 2024-12-12T05:41:00,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:00,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=C 2024-12-12T05:41:00,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:00,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/d3ffab59d01c4f6f9da8fe8d4c24c05a is 50, key is test_row_0/A:col10/1733982059228/Put/seqid=0 2024-12-12T05:41:00,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742250_1426 (size=12001) 2024-12-12T05:41:00,982 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/d3ffab59d01c4f6f9da8fe8d4c24c05a 2024-12-12T05:41:00,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/756eb8a0ece64eba8803b3bfb36fe4ba is 50, key is test_row_0/B:col10/1733982059228/Put/seqid=0 2024-12-12T05:41:00,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742251_1427 (size=12001) 2024-12-12T05:41:01,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-12T05:41:01,353 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:01,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on ec02d2a34ba48290fed943fe35718728 2024-12-12T05:41:01,381 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:01,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982121358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:01,384 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:01,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982121380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:01,385 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:01,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982121380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:01,385 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:01,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982121380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:01,385 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:01,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982121381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:01,389 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/756eb8a0ece64eba8803b3bfb36fe4ba 2024-12-12T05:41:01,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/38f88709476446da803163a441274b72 is 50, key is test_row_0/C:col10/1733982059228/Put/seqid=0 2024-12-12T05:41:01,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742252_1428 (size=12001) 2024-12-12T05:41:01,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:01,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982121482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:01,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:01,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982121485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:01,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:01,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982121485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:01,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:01,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982121486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:01,488 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:01,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982121486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:01,688 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:01,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982121685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:01,690 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:01,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982121688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:01,690 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:01,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982121689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:01,690 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:01,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982121690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:01,691 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:01,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982121691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:01,798 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/38f88709476446da803163a441274b72 2024-12-12T05:41:01,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/d3ffab59d01c4f6f9da8fe8d4c24c05a as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/d3ffab59d01c4f6f9da8fe8d4c24c05a 2024-12-12T05:41:01,804 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/d3ffab59d01c4f6f9da8fe8d4c24c05a, entries=150, sequenceid=38, filesize=11.7 K 2024-12-12T05:41:01,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/756eb8a0ece64eba8803b3bfb36fe4ba as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/756eb8a0ece64eba8803b3bfb36fe4ba 2024-12-12T05:41:01,808 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/756eb8a0ece64eba8803b3bfb36fe4ba, entries=150, sequenceid=38, filesize=11.7 K 2024-12-12T05:41:01,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/38f88709476446da803163a441274b72 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/38f88709476446da803163a441274b72 2024-12-12T05:41:01,811 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/38f88709476446da803163a441274b72, entries=150, sequenceid=38, filesize=11.7 K 2024-12-12T05:41:01,812 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for ec02d2a34ba48290fed943fe35718728 in 1238ms, sequenceid=38, compaction requested=false 2024-12-12T05:41:01,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2538): Flush status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:01,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:01,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=125 2024-12-12T05:41:01,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=125 2024-12-12T05:41:01,814 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-12-12T05:41:01,814 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6080 sec 2024-12-12T05:41:01,814 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees in 2.6100 sec 2024-12-12T05:41:01,890 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-12T05:41:01,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on ec02d2a34ba48290fed943fe35718728 2024-12-12T05:41:01,991 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ec02d2a34ba48290fed943fe35718728 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T05:41:01,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=A 2024-12-12T05:41:01,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:01,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=B 2024-12-12T05:41:01,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:01,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=C 2024-12-12T05:41:01,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:01,994 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/5c32f838800645e08963df498ed20f4b is 50, key is test_row_0/A:col10/1733982061990/Put/seqid=0 2024-12-12T05:41:01,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742253_1429 (size=9657) 2024-12-12T05:41:02,015 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:02,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982122011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:02,016 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:02,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982122012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:02,016 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:02,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982122012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:02,018 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:02,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982122014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:02,019 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:02,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982122015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:02,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:02,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982122116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:02,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:02,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982122116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:02,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:02,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982122116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:02,125 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:02,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:02,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982122119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:02,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982122119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:02,326 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:02,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982122321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:02,326 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:02,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982122321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:02,326 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:02,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982122322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:02,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:02,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982122326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:02,330 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:02,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982122327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:02,397 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/5c32f838800645e08963df498ed20f4b 2024-12-12T05:41:02,434 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/192fb585f3d24cf3a17ddd2e555ffff4 is 50, key is test_row_0/B:col10/1733982061990/Put/seqid=0 2024-12-12T05:41:02,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742254_1430 (size=9657) 2024-12-12T05:41:02,629 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:02,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982122627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:02,631 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:02,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982122627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:02,631 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:02,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982122628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:02,631 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:02,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982122629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:02,632 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:02,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982122630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:02,837 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/192fb585f3d24cf3a17ddd2e555ffff4 2024-12-12T05:41:02,843 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/d86e13ac010b4e37896812465711ae61 is 50, key is test_row_0/C:col10/1733982061990/Put/seqid=0 2024-12-12T05:41:02,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742255_1431 (size=9657) 2024-12-12T05:41:03,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:03,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982123130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:03,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:03,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982123132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:03,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:03,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982123132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:03,135 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:03,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982123133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:03,137 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:03,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982123135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:03,246 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/d86e13ac010b4e37896812465711ae61 2024-12-12T05:41:03,249 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/5c32f838800645e08963df498ed20f4b as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/5c32f838800645e08963df498ed20f4b 2024-12-12T05:41:03,256 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/5c32f838800645e08963df498ed20f4b, entries=100, sequenceid=50, filesize=9.4 K 2024-12-12T05:41:03,256 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/192fb585f3d24cf3a17ddd2e555ffff4 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/192fb585f3d24cf3a17ddd2e555ffff4 2024-12-12T05:41:03,259 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/192fb585f3d24cf3a17ddd2e555ffff4, entries=100, sequenceid=50, filesize=9.4 K 2024-12-12T05:41:03,259 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/d86e13ac010b4e37896812465711ae61 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/d86e13ac010b4e37896812465711ae61 2024-12-12T05:41:03,262 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/d86e13ac010b4e37896812465711ae61, entries=100, sequenceid=50, filesize=9.4 K 2024-12-12T05:41:03,263 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for ec02d2a34ba48290fed943fe35718728 in 1272ms, sequenceid=50, compaction requested=true 2024-12-12T05:41:03,263 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:03,263 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ec02d2a34ba48290fed943fe35718728:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:41:03,263 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:03,263 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ec02d2a34ba48290fed943fe35718728:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:41:03,263 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:03,263 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:03,263 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:03,263 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ec02d2a34ba48290fed943fe35718728:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:41:03,263 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:03,264 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:03,264 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:03,264 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): ec02d2a34ba48290fed943fe35718728/B is initiating minor compaction (all files) 2024-12-12T05:41:03,264 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): ec02d2a34ba48290fed943fe35718728/A is initiating minor compaction (all files) 2024-12-12T05:41:03,264 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ec02d2a34ba48290fed943fe35718728/B in TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:03,264 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ec02d2a34ba48290fed943fe35718728/A in TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:03,264 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/75617546cd6442838b579b290e9af670, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/756eb8a0ece64eba8803b3bfb36fe4ba, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/192fb585f3d24cf3a17ddd2e555ffff4] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp, totalSize=32.9 K 2024-12-12T05:41:03,264 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/4f213a1f835e4c9ca734fb750c000d6e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/d3ffab59d01c4f6f9da8fe8d4c24c05a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/5c32f838800645e08963df498ed20f4b] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp, totalSize=32.9 K 2024-12-12T05:41:03,264 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 75617546cd6442838b579b290e9af670, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733982059219 2024-12-12T05:41:03,264 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4f213a1f835e4c9ca734fb750c000d6e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733982059219 2024-12-12T05:41:03,264 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 756eb8a0ece64eba8803b3bfb36fe4ba, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733982059227 2024-12-12T05:41:03,264 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting d3ffab59d01c4f6f9da8fe8d4c24c05a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733982059227 2024-12-12T05:41:03,264 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 192fb585f3d24cf3a17ddd2e555ffff4, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733982061380 2024-12-12T05:41:03,264 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5c32f838800645e08963df498ed20f4b, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733982061380 2024-12-12T05:41:03,271 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ec02d2a34ba48290fed943fe35718728#A#compaction#357 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:03,271 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/0065d168bc1c4731bd01a4201da0e950 is 50, key is test_row_0/A:col10/1733982061990/Put/seqid=0 2024-12-12T05:41:03,276 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ec02d2a34ba48290fed943fe35718728#B#compaction#358 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:03,277 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/88c67e94385a403585ea73648fa4f49e is 50, key is test_row_0/B:col10/1733982061990/Put/seqid=0 2024-12-12T05:41:03,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742256_1432 (size=12104) 2024-12-12T05:41:03,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742257_1433 (size=12104) 2024-12-12T05:41:03,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-12T05:41:03,309 INFO [Thread-1906 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-12-12T05:41:03,310 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:41:03,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees 2024-12-12T05:41:03,311 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:41:03,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-12T05:41:03,312 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:41:03,312 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:41:03,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-12T05:41:03,463 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:03,463 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-12T05:41:03,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:03,464 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2837): Flushing ec02d2a34ba48290fed943fe35718728 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T05:41:03,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=A 2024-12-12T05:41:03,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:03,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=B 2024-12-12T05:41:03,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:03,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=C 2024-12-12T05:41:03,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:03,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/02d594fac7e74b698269c3b9c95cc748 is 50, key is test_row_0/A:col10/1733982062011/Put/seqid=0 2024-12-12T05:41:03,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742258_1434 (size=12001) 2024-12-12T05:41:03,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-12T05:41:03,691 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/0065d168bc1c4731bd01a4201da0e950 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/0065d168bc1c4731bd01a4201da0e950 2024-12-12T05:41:03,695 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ec02d2a34ba48290fed943fe35718728/A of ec02d2a34ba48290fed943fe35718728 into 0065d168bc1c4731bd01a4201da0e950(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:03,695 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:03,695 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., storeName=ec02d2a34ba48290fed943fe35718728/A, priority=13, startTime=1733982063263; duration=0sec 2024-12-12T05:41:03,695 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:03,695 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ec02d2a34ba48290fed943fe35718728:A 2024-12-12T05:41:03,695 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:03,695 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/88c67e94385a403585ea73648fa4f49e as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/88c67e94385a403585ea73648fa4f49e 2024-12-12T05:41:03,702 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:03,702 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): ec02d2a34ba48290fed943fe35718728/C is initiating minor compaction (all files) 2024-12-12T05:41:03,702 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ec02d2a34ba48290fed943fe35718728/C in TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:03,702 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/acaf7a38405743c9b6b634b62e658555, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/38f88709476446da803163a441274b72, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/d86e13ac010b4e37896812465711ae61] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp, totalSize=32.9 K 2024-12-12T05:41:03,702 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting acaf7a38405743c9b6b634b62e658555, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733982059219 2024-12-12T05:41:03,702 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38f88709476446da803163a441274b72, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733982059227 2024-12-12T05:41:03,703 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting d86e13ac010b4e37896812465711ae61, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733982061380 2024-12-12T05:41:03,705 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ec02d2a34ba48290fed943fe35718728/B of ec02d2a34ba48290fed943fe35718728 into 88c67e94385a403585ea73648fa4f49e(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:03,705 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:03,705 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., storeName=ec02d2a34ba48290fed943fe35718728/B, priority=13, startTime=1733982063263; duration=0sec 2024-12-12T05:41:03,705 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:03,705 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ec02d2a34ba48290fed943fe35718728:B 2024-12-12T05:41:03,713 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ec02d2a34ba48290fed943fe35718728#C#compaction#360 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:03,713 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/5e0f7b8e4ce14bb9851dffc230da5781 is 50, key is test_row_0/C:col10/1733982061990/Put/seqid=0 2024-12-12T05:41:03,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742259_1435 (size=12104) 2024-12-12T05:41:03,871 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/02d594fac7e74b698269c3b9c95cc748 2024-12-12T05:41:03,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/07f461a4ddcd49799f12185b085846aa is 50, key is test_row_0/B:col10/1733982062011/Put/seqid=0 2024-12-12T05:41:03,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742260_1436 (size=12001) 2024-12-12T05:41:03,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-12T05:41:04,120 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/5e0f7b8e4ce14bb9851dffc230da5781 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/5e0f7b8e4ce14bb9851dffc230da5781 2024-12-12T05:41:04,123 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ec02d2a34ba48290fed943fe35718728/C of ec02d2a34ba48290fed943fe35718728 into 5e0f7b8e4ce14bb9851dffc230da5781(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:04,123 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:04,123 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., storeName=ec02d2a34ba48290fed943fe35718728/C, priority=13, startTime=1733982063263; duration=0sec 2024-12-12T05:41:04,123 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:04,123 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ec02d2a34ba48290fed943fe35718728:C 2024-12-12T05:41:04,142 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:04,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on ec02d2a34ba48290fed943fe35718728 2024-12-12T05:41:04,150 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:04,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:04,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982124145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:04,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982124145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:04,153 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:04,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982124149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:04,154 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:04,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982124150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:04,154 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:04,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982124150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:04,252 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:04,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982124251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:04,252 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:04,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982124251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:04,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:04,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982124254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:04,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:04,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982124254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:04,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:04,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982124255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:04,279 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/07f461a4ddcd49799f12185b085846aa 2024-12-12T05:41:04,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/3038721b9d4f47ffa9422eeb13c25fb0 is 50, key is test_row_0/C:col10/1733982062011/Put/seqid=0 2024-12-12T05:41:04,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742261_1437 (size=12001) 2024-12-12T05:41:04,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-12T05:41:04,454 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:04,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982124453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:04,456 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:04,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982124454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:04,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:04,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982124457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:04,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:04,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982124457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:04,461 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:04,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982124458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:04,688 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/3038721b9d4f47ffa9422eeb13c25fb0 2024-12-12T05:41:04,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/02d594fac7e74b698269c3b9c95cc748 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/02d594fac7e74b698269c3b9c95cc748 2024-12-12T05:41:04,693 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/02d594fac7e74b698269c3b9c95cc748, entries=150, sequenceid=74, filesize=11.7 K 2024-12-12T05:41:04,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/07f461a4ddcd49799f12185b085846aa as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/07f461a4ddcd49799f12185b085846aa 2024-12-12T05:41:04,696 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/07f461a4ddcd49799f12185b085846aa, entries=150, sequenceid=74, filesize=11.7 K 2024-12-12T05:41:04,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/3038721b9d4f47ffa9422eeb13c25fb0 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/3038721b9d4f47ffa9422eeb13c25fb0 2024-12-12T05:41:04,699 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/3038721b9d4f47ffa9422eeb13c25fb0, entries=150, sequenceid=74, filesize=11.7 K 2024-12-12T05:41:04,700 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for ec02d2a34ba48290fed943fe35718728 in 1236ms, sequenceid=74, compaction requested=false 2024-12-12T05:41:04,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2538): Flush status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:04,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:04,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=127 2024-12-12T05:41:04,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=127 2024-12-12T05:41:04,702 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-12-12T05:41:04,702 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3890 sec 2024-12-12T05:41:04,702 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees in 1.3920 sec 2024-12-12T05:41:04,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on ec02d2a34ba48290fed943fe35718728 2024-12-12T05:41:04,759 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ec02d2a34ba48290fed943fe35718728 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-12T05:41:04,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=A 2024-12-12T05:41:04,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:04,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=B 2024-12-12T05:41:04,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:04,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=C 2024-12-12T05:41:04,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:04,763 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/eff4b95551b3490dad790ea80f68486a is 50, key is test_row_0/A:col10/1733982064149/Put/seqid=0 2024-12-12T05:41:04,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742262_1438 (size=14341) 2024-12-12T05:41:04,779 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:04,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982124775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:04,780 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:04,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982124776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:04,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:04,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982124777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:04,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:04,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982124777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:04,782 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:04,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982124778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:04,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:04,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982124880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:04,883 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:04,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982124881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:04,884 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:04,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982124882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:04,885 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:04,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982124882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:04,886 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:04,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982124882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:05,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:05,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982125084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:05,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:05,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982125084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:05,088 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:05,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982125086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:05,090 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:05,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982125087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:05,090 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:05,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982125087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:05,166 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/eff4b95551b3490dad790ea80f68486a 2024-12-12T05:41:05,171 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/95eb5135d1cc4100b54c44901f419019 is 50, key is test_row_0/B:col10/1733982064149/Put/seqid=0 2024-12-12T05:41:05,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742263_1439 (size=12001) 2024-12-12T05:41:05,174 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/95eb5135d1cc4100b54c44901f419019 2024-12-12T05:41:05,179 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/eb6af930fed4468a8540bee85a41a511 is 50, key is test_row_0/C:col10/1733982064149/Put/seqid=0 2024-12-12T05:41:05,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742264_1440 (size=12001) 2024-12-12T05:41:05,390 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:05,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982125387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:05,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:05,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982125388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:05,392 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:05,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982125391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:05,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:05,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982125391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:05,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:05,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982125391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:05,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-12T05:41:05,415 INFO [Thread-1906 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-12-12T05:41:05,416 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:41:05,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-12-12T05:41:05,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-12T05:41:05,417 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:41:05,417 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:41:05,417 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:41:05,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-12T05:41:05,568 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:05,569 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-12T05:41:05,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:05,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:05,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:05,569 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:05,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:05,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:05,582 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/eb6af930fed4468a8540bee85a41a511 2024-12-12T05:41:05,585 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/eff4b95551b3490dad790ea80f68486a as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/eff4b95551b3490dad790ea80f68486a 2024-12-12T05:41:05,588 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/eff4b95551b3490dad790ea80f68486a, entries=200, sequenceid=91, filesize=14.0 K 2024-12-12T05:41:05,589 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/95eb5135d1cc4100b54c44901f419019 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/95eb5135d1cc4100b54c44901f419019 2024-12-12T05:41:05,592 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/95eb5135d1cc4100b54c44901f419019, entries=150, sequenceid=91, filesize=11.7 K 2024-12-12T05:41:05,592 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/eb6af930fed4468a8540bee85a41a511 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/eb6af930fed4468a8540bee85a41a511 2024-12-12T05:41:05,595 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/eb6af930fed4468a8540bee85a41a511, entries=150, sequenceid=91, filesize=11.7 K 2024-12-12T05:41:05,596 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for ec02d2a34ba48290fed943fe35718728 in 837ms, sequenceid=91, compaction requested=true 2024-12-12T05:41:05,596 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:05,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ec02d2a34ba48290fed943fe35718728:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:41:05,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:05,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ec02d2a34ba48290fed943fe35718728:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:41:05,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:05,596 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:05,596 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:05,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ec02d2a34ba48290fed943fe35718728:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:41:05,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:05,597 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38446 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:05,597 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:05,597 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): ec02d2a34ba48290fed943fe35718728/A is initiating minor compaction (all files) 2024-12-12T05:41:05,597 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): ec02d2a34ba48290fed943fe35718728/B is initiating minor compaction (all files) 2024-12-12T05:41:05,597 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ec02d2a34ba48290fed943fe35718728/B in TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:05,597 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ec02d2a34ba48290fed943fe35718728/A in TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:05,597 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/88c67e94385a403585ea73648fa4f49e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/07f461a4ddcd49799f12185b085846aa, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/95eb5135d1cc4100b54c44901f419019] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp, totalSize=35.3 K 2024-12-12T05:41:05,597 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/0065d168bc1c4731bd01a4201da0e950, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/02d594fac7e74b698269c3b9c95cc748, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/eff4b95551b3490dad790ea80f68486a] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp, totalSize=37.5 K 2024-12-12T05:41:05,597 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0065d168bc1c4731bd01a4201da0e950, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733982059227 2024-12-12T05:41:05,597 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 88c67e94385a403585ea73648fa4f49e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733982059227 2024-12-12T05:41:05,597 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 07f461a4ddcd49799f12185b085846aa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733982062011 2024-12-12T05:41:05,598 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 02d594fac7e74b698269c3b9c95cc748, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733982062011 2024-12-12T05:41:05,598 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 95eb5135d1cc4100b54c44901f419019, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733982064149 2024-12-12T05:41:05,598 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting eff4b95551b3490dad790ea80f68486a, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733982064145 2024-12-12T05:41:05,603 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ec02d2a34ba48290fed943fe35718728#B#compaction#366 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:05,604 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/beaea3d0350b49a093ada34ce94ad7a4 is 50, key is test_row_0/B:col10/1733982064149/Put/seqid=0 2024-12-12T05:41:05,604 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ec02d2a34ba48290fed943fe35718728#A#compaction#367 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:05,604 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/3bf15963ed9d44f7a57e253e8d295381 is 50, key is test_row_0/A:col10/1733982064149/Put/seqid=0 2024-12-12T05:41:05,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742265_1441 (size=12207) 2024-12-12T05:41:05,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742266_1442 (size=12207) 2024-12-12T05:41:05,611 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/beaea3d0350b49a093ada34ce94ad7a4 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/beaea3d0350b49a093ada34ce94ad7a4 2024-12-12T05:41:05,614 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ec02d2a34ba48290fed943fe35718728/B of ec02d2a34ba48290fed943fe35718728 into beaea3d0350b49a093ada34ce94ad7a4(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:05,614 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:05,614 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., storeName=ec02d2a34ba48290fed943fe35718728/B, priority=13, startTime=1733982065596; duration=0sec 2024-12-12T05:41:05,614 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:05,614 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ec02d2a34ba48290fed943fe35718728:B 2024-12-12T05:41:05,614 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:05,615 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:05,615 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): ec02d2a34ba48290fed943fe35718728/C is initiating minor compaction (all files) 2024-12-12T05:41:05,615 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ec02d2a34ba48290fed943fe35718728/C in TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:05,615 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/5e0f7b8e4ce14bb9851dffc230da5781, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/3038721b9d4f47ffa9422eeb13c25fb0, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/eb6af930fed4468a8540bee85a41a511] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp, totalSize=35.3 K 2024-12-12T05:41:05,615 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e0f7b8e4ce14bb9851dffc230da5781, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733982059227 2024-12-12T05:41:05,616 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 3038721b9d4f47ffa9422eeb13c25fb0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733982062011 2024-12-12T05:41:05,616 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting eb6af930fed4468a8540bee85a41a511, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733982064149 2024-12-12T05:41:05,620 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ec02d2a34ba48290fed943fe35718728#C#compaction#368 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:05,620 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/af33e9ff810f484fbfe3672d3fe5cae7 is 50, key is test_row_0/C:col10/1733982064149/Put/seqid=0 2024-12-12T05:41:05,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742267_1443 (size=12207) 2024-12-12T05:41:05,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-12T05:41:05,721 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:05,721 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-12T05:41:05,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:05,721 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing ec02d2a34ba48290fed943fe35718728 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-12T05:41:05,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=A 2024-12-12T05:41:05,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:05,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=B 2024-12-12T05:41:05,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:05,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=C 2024-12-12T05:41:05,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:05,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/213a3ab9a9d040e5ac993bcac23fbc64 is 50, key is test_row_0/A:col10/1733982064777/Put/seqid=0 2024-12-12T05:41:05,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742268_1444 (size=12001) 2024-12-12T05:41:05,895 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:05,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on ec02d2a34ba48290fed943fe35718728 2024-12-12T05:41:05,908 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:05,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982125902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:05,908 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:05,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982125903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:05,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:05,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982125906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:05,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:05,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982125907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:05,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:05,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982125907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:06,011 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/3bf15963ed9d44f7a57e253e8d295381 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/3bf15963ed9d44f7a57e253e8d295381 2024-12-12T05:41:06,012 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:06,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982126009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:06,012 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:06,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982126009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:06,014 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:06,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982126012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:06,014 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:06,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982126012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:06,014 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:06,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982126012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:06,015 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ec02d2a34ba48290fed943fe35718728/A of ec02d2a34ba48290fed943fe35718728 into 3bf15963ed9d44f7a57e253e8d295381(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:06,015 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:06,015 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., storeName=ec02d2a34ba48290fed943fe35718728/A, priority=13, startTime=1733982065596; duration=0sec 2024-12-12T05:41:06,015 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:06,015 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ec02d2a34ba48290fed943fe35718728:A 2024-12-12T05:41:06,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-12T05:41:06,027 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/af33e9ff810f484fbfe3672d3fe5cae7 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/af33e9ff810f484fbfe3672d3fe5cae7 2024-12-12T05:41:06,030 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ec02d2a34ba48290fed943fe35718728/C of ec02d2a34ba48290fed943fe35718728 into af33e9ff810f484fbfe3672d3fe5cae7(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:06,030 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:06,030 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., storeName=ec02d2a34ba48290fed943fe35718728/C, priority=13, startTime=1733982065596; duration=0sec 2024-12-12T05:41:06,030 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:06,030 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ec02d2a34ba48290fed943fe35718728:C 2024-12-12T05:41:06,129 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/213a3ab9a9d040e5ac993bcac23fbc64 2024-12-12T05:41:06,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/f9755624a5dd40fc9538cf76d34ece74 is 50, key is test_row_0/B:col10/1733982064777/Put/seqid=0 2024-12-12T05:41:06,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742269_1445 (size=12001) 2024-12-12T05:41:06,138 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/f9755624a5dd40fc9538cf76d34ece74 2024-12-12T05:41:06,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/10415e1317fc420eb54f6bbbd223c5ab is 50, key is test_row_0/C:col10/1733982064777/Put/seqid=0 2024-12-12T05:41:06,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742270_1446 (size=12001) 2024-12-12T05:41:06,149 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/10415e1317fc420eb54f6bbbd223c5ab 2024-12-12T05:41:06,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/213a3ab9a9d040e5ac993bcac23fbc64 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/213a3ab9a9d040e5ac993bcac23fbc64 2024-12-12T05:41:06,154 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/213a3ab9a9d040e5ac993bcac23fbc64, entries=150, sequenceid=114, filesize=11.7 K 2024-12-12T05:41:06,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/f9755624a5dd40fc9538cf76d34ece74 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/f9755624a5dd40fc9538cf76d34ece74 2024-12-12T05:41:06,158 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/f9755624a5dd40fc9538cf76d34ece74, entries=150, sequenceid=114, filesize=11.7 K 2024-12-12T05:41:06,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/10415e1317fc420eb54f6bbbd223c5ab as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/10415e1317fc420eb54f6bbbd223c5ab 2024-12-12T05:41:06,162 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/10415e1317fc420eb54f6bbbd223c5ab, entries=150, sequenceid=114, filesize=11.7 K 2024-12-12T05:41:06,162 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for ec02d2a34ba48290fed943fe35718728 in 441ms, sequenceid=114, compaction requested=false 2024-12-12T05:41:06,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:06,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:06,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-12-12T05:41:06,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-12-12T05:41:06,164 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-12-12T05:41:06,164 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 746 msec 2024-12-12T05:41:06,165 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 748 msec 2024-12-12T05:41:06,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on ec02d2a34ba48290fed943fe35718728 2024-12-12T05:41:06,218 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ec02d2a34ba48290fed943fe35718728 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-12T05:41:06,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=A 2024-12-12T05:41:06,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:06,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=B 2024-12-12T05:41:06,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:06,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=C 2024-12-12T05:41:06,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:06,223 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/607cba0a106d4081958d05592486dcbd is 50, key is test_row_0/A:col10/1733982065907/Put/seqid=0 2024-12-12T05:41:06,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742271_1447 (size=14491) 2024-12-12T05:41:06,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:06,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982126257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:06,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:06,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982126257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:06,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:06,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982126262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:06,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:06,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982126262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:06,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:06,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982126263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:06,367 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:06,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982126364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:06,367 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:06,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982126364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:06,372 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:06,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982126369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:06,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:06,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982126369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:06,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:06,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982126369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:06,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-12T05:41:06,519 INFO [Thread-1906 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-12-12T05:41:06,520 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:41:06,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-12-12T05:41:06,521 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:41:06,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-12T05:41:06,521 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:41:06,522 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:41:06,570 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:06,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982126568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:06,570 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:06,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982126568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:06,577 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:06,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982126574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:06,577 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:06,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982126574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:06,577 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:06,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982126575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:06,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-12T05:41:06,626 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/607cba0a106d4081958d05592486dcbd 2024-12-12T05:41:06,632 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/5e89dc537461469b9790dfc154f7414c is 50, key is test_row_0/B:col10/1733982065907/Put/seqid=0 2024-12-12T05:41:06,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742272_1448 (size=12101) 2024-12-12T05:41:06,672 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:06,673 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-12T05:41:06,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:06,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:06,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:06,673 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:06,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:06,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:06,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-12T05:41:06,825 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:06,825 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-12T05:41:06,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:06,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:06,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:06,825 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:06,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:06,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:06,874 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:06,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982126871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:06,875 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:06,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982126872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:06,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:06,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982126878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:06,882 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:06,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982126879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:06,882 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:06,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982126879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:06,976 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:06,976 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-12T05:41:06,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:06,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:06,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:06,976 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:06,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:06,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:07,036 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/5e89dc537461469b9790dfc154f7414c 2024-12-12T05:41:07,040 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/d7b58396fd324020907c1572a0749cfd is 50, key is test_row_0/C:col10/1733982065907/Put/seqid=0 2024-12-12T05:41:07,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742273_1449 (size=12101) 2024-12-12T05:41:07,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-12T05:41:07,128 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:07,128 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-12T05:41:07,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:07,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:07,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:07,129 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:07,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:07,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:07,280 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:07,280 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-12T05:41:07,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:07,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:07,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:07,281 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:07,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:07,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:07,378 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:07,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982127375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:07,379 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:07,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982127376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:07,383 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:07,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982127380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:07,387 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:07,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982127385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:07,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:07,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982127386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:07,432 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:07,433 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-12T05:41:07,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:07,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:07,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:07,433 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:07,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:07,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:07,443 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/d7b58396fd324020907c1572a0749cfd 2024-12-12T05:41:07,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/607cba0a106d4081958d05592486dcbd as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/607cba0a106d4081958d05592486dcbd 2024-12-12T05:41:07,449 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/607cba0a106d4081958d05592486dcbd, entries=200, sequenceid=133, filesize=14.2 K 2024-12-12T05:41:07,449 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/5e89dc537461469b9790dfc154f7414c as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/5e89dc537461469b9790dfc154f7414c 2024-12-12T05:41:07,451 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/5e89dc537461469b9790dfc154f7414c, entries=150, sequenceid=133, filesize=11.8 K 2024-12-12T05:41:07,452 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/d7b58396fd324020907c1572a0749cfd as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/d7b58396fd324020907c1572a0749cfd 2024-12-12T05:41:07,454 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/d7b58396fd324020907c1572a0749cfd, entries=150, sequenceid=133, filesize=11.8 K 2024-12-12T05:41:07,455 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for ec02d2a34ba48290fed943fe35718728 in 1237ms, sequenceid=133, compaction requested=true 2024-12-12T05:41:07,455 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:07,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ec02d2a34ba48290fed943fe35718728:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:41:07,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:07,455 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:07,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ec02d2a34ba48290fed943fe35718728:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:41:07,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:07,455 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:07,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ec02d2a34ba48290fed943fe35718728:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:41:07,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:07,456 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38699 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:07,456 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): ec02d2a34ba48290fed943fe35718728/A is initiating minor compaction (all files) 2024-12-12T05:41:07,456 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ec02d2a34ba48290fed943fe35718728/A in TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:07,456 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:07,456 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): ec02d2a34ba48290fed943fe35718728/B is initiating minor compaction (all files) 2024-12-12T05:41:07,456 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/3bf15963ed9d44f7a57e253e8d295381, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/213a3ab9a9d040e5ac993bcac23fbc64, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/607cba0a106d4081958d05592486dcbd] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp, totalSize=37.8 K 2024-12-12T05:41:07,456 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ec02d2a34ba48290fed943fe35718728/B in TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:07,456 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/beaea3d0350b49a093ada34ce94ad7a4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/f9755624a5dd40fc9538cf76d34ece74, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/5e89dc537461469b9790dfc154f7414c] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp, totalSize=35.5 K 2024-12-12T05:41:07,456 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3bf15963ed9d44f7a57e253e8d295381, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733982064149 2024-12-12T05:41:07,456 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting beaea3d0350b49a093ada34ce94ad7a4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733982064149 2024-12-12T05:41:07,456 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 213a3ab9a9d040e5ac993bcac23fbc64, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1733982064763 2024-12-12T05:41:07,456 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting f9755624a5dd40fc9538cf76d34ece74, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1733982064763 2024-12-12T05:41:07,457 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 607cba0a106d4081958d05592486dcbd, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733982065907 2024-12-12T05:41:07,457 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e89dc537461469b9790dfc154f7414c, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733982065907 2024-12-12T05:41:07,466 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ec02d2a34ba48290fed943fe35718728#A#compaction#375 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:07,467 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ec02d2a34ba48290fed943fe35718728#B#compaction#376 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:07,467 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/f66da54f255d472ab00e38e9731c6ab7 is 50, key is test_row_0/A:col10/1733982065907/Put/seqid=0 2024-12-12T05:41:07,467 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/7eed6bf1467d425ca6fcf60e48554fe1 is 50, key is test_row_0/B:col10/1733982065907/Put/seqid=0 2024-12-12T05:41:07,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742274_1450 (size=12409) 2024-12-12T05:41:07,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742275_1451 (size=12409) 2024-12-12T05:41:07,589 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:07,590 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-12T05:41:07,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:07,590 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing ec02d2a34ba48290fed943fe35718728 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-12T05:41:07,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=A 2024-12-12T05:41:07,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:07,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=B 2024-12-12T05:41:07,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:07,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=C 2024-12-12T05:41:07,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:07,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/80e40da0edc241a4923ee516bc4cca7f is 50, key is test_row_0/A:col10/1733982066261/Put/seqid=0 2024-12-12T05:41:07,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742276_1452 (size=12151) 2024-12-12T05:41:07,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-12T05:41:07,873 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/7eed6bf1467d425ca6fcf60e48554fe1 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/7eed6bf1467d425ca6fcf60e48554fe1 2024-12-12T05:41:07,873 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/f66da54f255d472ab00e38e9731c6ab7 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/f66da54f255d472ab00e38e9731c6ab7 2024-12-12T05:41:07,876 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ec02d2a34ba48290fed943fe35718728/A of ec02d2a34ba48290fed943fe35718728 into f66da54f255d472ab00e38e9731c6ab7(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:07,876 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ec02d2a34ba48290fed943fe35718728/B of ec02d2a34ba48290fed943fe35718728 into 7eed6bf1467d425ca6fcf60e48554fe1(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:07,876 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:07,876 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:07,876 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., storeName=ec02d2a34ba48290fed943fe35718728/A, priority=13, startTime=1733982067455; duration=0sec 2024-12-12T05:41:07,876 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., storeName=ec02d2a34ba48290fed943fe35718728/B, priority=13, startTime=1733982067455; duration=0sec 2024-12-12T05:41:07,876 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:07,876 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:07,876 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ec02d2a34ba48290fed943fe35718728:A 2024-12-12T05:41:07,876 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ec02d2a34ba48290fed943fe35718728:B 2024-12-12T05:41:07,876 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:07,877 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:07,877 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): ec02d2a34ba48290fed943fe35718728/C is initiating minor compaction (all files) 2024-12-12T05:41:07,877 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ec02d2a34ba48290fed943fe35718728/C in TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:07,877 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/af33e9ff810f484fbfe3672d3fe5cae7, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/10415e1317fc420eb54f6bbbd223c5ab, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/d7b58396fd324020907c1572a0749cfd] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp, totalSize=35.5 K 2024-12-12T05:41:07,877 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting af33e9ff810f484fbfe3672d3fe5cae7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733982064149 2024-12-12T05:41:07,878 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 10415e1317fc420eb54f6bbbd223c5ab, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1733982064763 2024-12-12T05:41:07,878 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting d7b58396fd324020907c1572a0749cfd, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733982065907 2024-12-12T05:41:07,883 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ec02d2a34ba48290fed943fe35718728#C#compaction#378 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:07,883 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/13980602b6ed42439667eb86b1e1c2eb is 50, key is test_row_0/C:col10/1733982065907/Put/seqid=0 2024-12-12T05:41:07,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742277_1453 (size=12409) 2024-12-12T05:41:07,997 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/80e40da0edc241a4923ee516bc4cca7f 2024-12-12T05:41:08,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/2f112711faf0474d89b89cd47ca26389 is 50, key is test_row_0/B:col10/1733982066261/Put/seqid=0 2024-12-12T05:41:08,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742278_1454 (size=12151) 2024-12-12T05:41:08,290 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/13980602b6ed42439667eb86b1e1c2eb as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/13980602b6ed42439667eb86b1e1c2eb 2024-12-12T05:41:08,294 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ec02d2a34ba48290fed943fe35718728/C of ec02d2a34ba48290fed943fe35718728 into 13980602b6ed42439667eb86b1e1c2eb(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:08,294 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:08,294 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., storeName=ec02d2a34ba48290fed943fe35718728/C, priority=13, startTime=1733982067455; duration=0sec 2024-12-12T05:41:08,294 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:08,294 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ec02d2a34ba48290fed943fe35718728:C 2024-12-12T05:41:08,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on ec02d2a34ba48290fed943fe35718728 2024-12-12T05:41:08,384 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:08,400 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:08,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982128397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:08,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:08,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982128397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:08,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:08,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982128398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:08,402 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:08,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982128399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:08,402 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:08,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982128399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:08,405 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/2f112711faf0474d89b89cd47ca26389 2024-12-12T05:41:08,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/00b9d7aba66049e7874dcdb402d4dcc9 is 50, key is test_row_0/C:col10/1733982066261/Put/seqid=0 2024-12-12T05:41:08,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742279_1455 (size=12151) 2024-12-12T05:41:08,502 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:08,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982128501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:08,502 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:08,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982128501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:08,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:08,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982128503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:08,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:08,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982128503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:08,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-12T05:41:08,705 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:08,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982128703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:08,705 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:08,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982128703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:08,708 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:08,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982128706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:08,709 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:08,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982128706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:08,813 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/00b9d7aba66049e7874dcdb402d4dcc9 2024-12-12T05:41:08,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/80e40da0edc241a4923ee516bc4cca7f as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/80e40da0edc241a4923ee516bc4cca7f 2024-12-12T05:41:08,823 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/80e40da0edc241a4923ee516bc4cca7f, entries=150, sequenceid=152, filesize=11.9 K 2024-12-12T05:41:08,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/2f112711faf0474d89b89cd47ca26389 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/2f112711faf0474d89b89cd47ca26389 2024-12-12T05:41:08,826 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/2f112711faf0474d89b89cd47ca26389, entries=150, sequenceid=152, filesize=11.9 K 2024-12-12T05:41:08,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/00b9d7aba66049e7874dcdb402d4dcc9 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/00b9d7aba66049e7874dcdb402d4dcc9 2024-12-12T05:41:08,830 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/00b9d7aba66049e7874dcdb402d4dcc9, entries=150, sequenceid=152, filesize=11.9 K 2024-12-12T05:41:08,830 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for ec02d2a34ba48290fed943fe35718728 in 1240ms, sequenceid=152, compaction requested=false 2024-12-12T05:41:08,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:08,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:08,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-12-12T05:41:08,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-12-12T05:41:08,832 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-12-12T05:41:08,832 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3100 sec 2024-12-12T05:41:08,833 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 2.3120 sec 2024-12-12T05:41:09,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on ec02d2a34ba48290fed943fe35718728 2024-12-12T05:41:09,008 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ec02d2a34ba48290fed943fe35718728 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-12T05:41:09,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=A 2024-12-12T05:41:09,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:09,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=B 2024-12-12T05:41:09,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:09,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=C 2024-12-12T05:41:09,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:09,012 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/b549b62a90b24add90ae2e94f45d2a89 is 50, key is test_row_0/A:col10/1733982069008/Put/seqid=0 2024-12-12T05:41:09,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742280_1456 (size=14541) 2024-12-12T05:41:09,025 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:09,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982129023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:09,028 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:09,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982129024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:09,028 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:09,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982129025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:09,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:09,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982129025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:09,130 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:09,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982129126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:09,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:09,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982129129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:09,132 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:09,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982129129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:09,134 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:09,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982129130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:09,334 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:09,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982129331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:09,334 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:09,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982129332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:09,334 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:09,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982129332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:09,338 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:09,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982129335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:09,415 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/b549b62a90b24add90ae2e94f45d2a89 2024-12-12T05:41:09,420 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/0b0ad5b753714f31b1cd89dfa4b13c5a is 50, key is test_row_0/B:col10/1733982069008/Put/seqid=0 2024-12-12T05:41:09,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742281_1457 (size=12151) 2024-12-12T05:41:09,638 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:09,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982129635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:09,639 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:09,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982129636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:09,639 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:09,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982129637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:09,644 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:09,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982129641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:09,824 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/0b0ad5b753714f31b1cd89dfa4b13c5a 2024-12-12T05:41:09,829 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/78cb4438e6534888b31a712e31db8ff0 is 50, key is test_row_0/C:col10/1733982069008/Put/seqid=0 2024-12-12T05:41:09,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742282_1458 (size=12151) 2024-12-12T05:41:10,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:10,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982130141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:10,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:10,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982130143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:10,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:10,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982130143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:10,148 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:10,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982130147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:10,232 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/78cb4438e6534888b31a712e31db8ff0 2024-12-12T05:41:10,235 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/b549b62a90b24add90ae2e94f45d2a89 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/b549b62a90b24add90ae2e94f45d2a89 2024-12-12T05:41:10,238 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/b549b62a90b24add90ae2e94f45d2a89, entries=200, sequenceid=174, filesize=14.2 K 2024-12-12T05:41:10,238 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/0b0ad5b753714f31b1cd89dfa4b13c5a as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/0b0ad5b753714f31b1cd89dfa4b13c5a 2024-12-12T05:41:10,242 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/0b0ad5b753714f31b1cd89dfa4b13c5a, entries=150, sequenceid=174, filesize=11.9 K 2024-12-12T05:41:10,242 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/78cb4438e6534888b31a712e31db8ff0 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/78cb4438e6534888b31a712e31db8ff0 2024-12-12T05:41:10,245 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/78cb4438e6534888b31a712e31db8ff0, entries=150, sequenceid=174, filesize=11.9 K 2024-12-12T05:41:10,246 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for ec02d2a34ba48290fed943fe35718728 in 1238ms, sequenceid=174, compaction requested=true 2024-12-12T05:41:10,246 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:10,246 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ec02d2a34ba48290fed943fe35718728:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:41:10,246 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:10,246 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:10,246 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ec02d2a34ba48290fed943fe35718728:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:41:10,246 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:10,246 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:10,246 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ec02d2a34ba48290fed943fe35718728:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:41:10,246 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:10,247 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39101 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:10,247 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:10,247 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): ec02d2a34ba48290fed943fe35718728/A is initiating minor compaction (all files) 2024-12-12T05:41:10,247 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): ec02d2a34ba48290fed943fe35718728/B is initiating minor compaction (all files) 2024-12-12T05:41:10,247 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ec02d2a34ba48290fed943fe35718728/A in TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:10,247 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ec02d2a34ba48290fed943fe35718728/B in TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:10,247 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/f66da54f255d472ab00e38e9731c6ab7, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/80e40da0edc241a4923ee516bc4cca7f, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/b549b62a90b24add90ae2e94f45d2a89] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp, totalSize=38.2 K 2024-12-12T05:41:10,247 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/7eed6bf1467d425ca6fcf60e48554fe1, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/2f112711faf0474d89b89cd47ca26389, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/0b0ad5b753714f31b1cd89dfa4b13c5a] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp, totalSize=35.9 K 2024-12-12T05:41:10,247 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting f66da54f255d472ab00e38e9731c6ab7, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733982065907 2024-12-12T05:41:10,247 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 7eed6bf1467d425ca6fcf60e48554fe1, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733982065907 2024-12-12T05:41:10,248 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 2f112711faf0474d89b89cd47ca26389, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1733982066261 2024-12-12T05:41:10,248 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 80e40da0edc241a4923ee516bc4cca7f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1733982066261 2024-12-12T05:41:10,248 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b0ad5b753714f31b1cd89dfa4b13c5a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1733982068397 2024-12-12T05:41:10,248 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting b549b62a90b24add90ae2e94f45d2a89, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1733982068397 2024-12-12T05:41:10,254 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ec02d2a34ba48290fed943fe35718728#A#compaction#384 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:10,254 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/5cc4f89fa1b143a28c364cb146ad1caf is 50, key is test_row_0/A:col10/1733982069008/Put/seqid=0 2024-12-12T05:41:10,256 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ec02d2a34ba48290fed943fe35718728#B#compaction#385 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:10,256 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/5b301a990b3e4a22a1817cff6150abfa is 50, key is test_row_0/B:col10/1733982069008/Put/seqid=0 2024-12-12T05:41:10,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742284_1460 (size=12561) 2024-12-12T05:41:10,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742283_1459 (size=12561) 2024-12-12T05:41:10,279 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/5cc4f89fa1b143a28c364cb146ad1caf as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/5cc4f89fa1b143a28c364cb146ad1caf 2024-12-12T05:41:10,283 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ec02d2a34ba48290fed943fe35718728/A of ec02d2a34ba48290fed943fe35718728 into 5cc4f89fa1b143a28c364cb146ad1caf(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:10,283 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:10,283 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., storeName=ec02d2a34ba48290fed943fe35718728/A, priority=13, startTime=1733982070246; duration=0sec 2024-12-12T05:41:10,283 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:10,283 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ec02d2a34ba48290fed943fe35718728:A 2024-12-12T05:41:10,283 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:10,284 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:10,284 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): ec02d2a34ba48290fed943fe35718728/C is initiating minor compaction (all files) 2024-12-12T05:41:10,284 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ec02d2a34ba48290fed943fe35718728/C in TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:10,284 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/13980602b6ed42439667eb86b1e1c2eb, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/00b9d7aba66049e7874dcdb402d4dcc9, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/78cb4438e6534888b31a712e31db8ff0] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp, totalSize=35.9 K 2024-12-12T05:41:10,285 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 13980602b6ed42439667eb86b1e1c2eb, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733982065907 2024-12-12T05:41:10,285 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 00b9d7aba66049e7874dcdb402d4dcc9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1733982066261 2024-12-12T05:41:10,285 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 78cb4438e6534888b31a712e31db8ff0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1733982068397 2024-12-12T05:41:10,294 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ec02d2a34ba48290fed943fe35718728#C#compaction#386 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:10,294 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/5aff2499b3ea472cb0fb8e9ea19e29d8 is 50, key is test_row_0/C:col10/1733982069008/Put/seqid=0 2024-12-12T05:41:10,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742285_1461 (size=12561) 2024-12-12T05:41:10,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on ec02d2a34ba48290fed943fe35718728 2024-12-12T05:41:10,413 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ec02d2a34ba48290fed943fe35718728 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-12T05:41:10,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=A 2024-12-12T05:41:10,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:10,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=B 2024-12-12T05:41:10,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:10,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=C 2024-12-12T05:41:10,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:10,417 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/85905f825d8c488183edd74514392658 is 50, key is test_row_0/A:col10/1733982069023/Put/seqid=0 2024-12-12T05:41:10,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742286_1462 (size=14541) 2024-12-12T05:41:10,466 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:10,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982130465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:10,569 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:10,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982130567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:10,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-12T05:41:10,625 INFO [Thread-1906 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-12-12T05:41:10,626 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:41:10,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-12-12T05:41:10,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-12T05:41:10,627 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:41:10,627 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:41:10,627 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:41:10,669 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/5b301a990b3e4a22a1817cff6150abfa as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/5b301a990b3e4a22a1817cff6150abfa 2024-12-12T05:41:10,673 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ec02d2a34ba48290fed943fe35718728/B of ec02d2a34ba48290fed943fe35718728 into 5b301a990b3e4a22a1817cff6150abfa(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:10,673 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:10,673 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., storeName=ec02d2a34ba48290fed943fe35718728/B, priority=13, startTime=1733982070246; duration=0sec 2024-12-12T05:41:10,673 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:10,673 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ec02d2a34ba48290fed943fe35718728:B 2024-12-12T05:41:10,707 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/5aff2499b3ea472cb0fb8e9ea19e29d8 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/5aff2499b3ea472cb0fb8e9ea19e29d8 2024-12-12T05:41:10,710 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ec02d2a34ba48290fed943fe35718728/C of ec02d2a34ba48290fed943fe35718728 into 5aff2499b3ea472cb0fb8e9ea19e29d8(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:10,710 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:10,710 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., storeName=ec02d2a34ba48290fed943fe35718728/C, priority=13, startTime=1733982070246; duration=0sec 2024-12-12T05:41:10,710 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:10,710 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ec02d2a34ba48290fed943fe35718728:C 2024-12-12T05:41:10,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-12T05:41:10,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:10,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982130771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:10,779 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:10,779 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-12T05:41:10,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:10,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:10,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:10,779 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:10,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:10,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:10,821 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/85905f825d8c488183edd74514392658 2024-12-12T05:41:10,826 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/23fffdb49373453fa0f944a1f5b2a78f is 50, key is test_row_0/B:col10/1733982069023/Put/seqid=0 2024-12-12T05:41:10,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742287_1463 (size=12151) 2024-12-12T05:41:10,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-12T05:41:10,931 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:10,931 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-12T05:41:10,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:10,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:10,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:10,931 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:10,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:10,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:11,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:11,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982131075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:11,083 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:11,083 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-12T05:41:11,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:11,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:11,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:11,083 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:11,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:11,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:11,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:11,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982131147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:11,155 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:11,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982131151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:11,156 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:11,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982131152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:11,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:11,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982131155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:11,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-12T05:41:11,230 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/23fffdb49373453fa0f944a1f5b2a78f 2024-12-12T05:41:11,235 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:11,235 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-12T05:41:11,235 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/cbd9776c957c4e9b8d1f703ec2e69e5c is 50, key is test_row_0/C:col10/1733982069023/Put/seqid=0 2024-12-12T05:41:11,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:11,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:11,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:11,236 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:11,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:11,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:11,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742288_1464 (size=12151) 2024-12-12T05:41:11,239 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/cbd9776c957c4e9b8d1f703ec2e69e5c 2024-12-12T05:41:11,242 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/85905f825d8c488183edd74514392658 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/85905f825d8c488183edd74514392658 2024-12-12T05:41:11,245 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/85905f825d8c488183edd74514392658, entries=200, sequenceid=194, filesize=14.2 K 2024-12-12T05:41:11,245 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/23fffdb49373453fa0f944a1f5b2a78f as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/23fffdb49373453fa0f944a1f5b2a78f 2024-12-12T05:41:11,247 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/23fffdb49373453fa0f944a1f5b2a78f, entries=150, sequenceid=194, filesize=11.9 K 2024-12-12T05:41:11,251 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/cbd9776c957c4e9b8d1f703ec2e69e5c as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/cbd9776c957c4e9b8d1f703ec2e69e5c 2024-12-12T05:41:11,254 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/cbd9776c957c4e9b8d1f703ec2e69e5c, entries=150, sequenceid=194, filesize=11.9 K 2024-12-12T05:41:11,255 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for ec02d2a34ba48290fed943fe35718728 in 842ms, sequenceid=194, compaction requested=false 2024-12-12T05:41:11,255 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:11,387 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:11,388 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-12T05:41:11,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:11,388 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing ec02d2a34ba48290fed943fe35718728 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-12T05:41:11,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=A 2024-12-12T05:41:11,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:11,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=B 2024-12-12T05:41:11,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:11,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=C 2024-12-12T05:41:11,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:11,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/9a018a296cde4b7099057aa2fd47d650 is 50, key is test_row_0/A:col10/1733982070465/Put/seqid=0 2024-12-12T05:41:11,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742289_1465 (size=12151) 2024-12-12T05:41:11,581 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:11,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on ec02d2a34ba48290fed943fe35718728 2024-12-12T05:41:11,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:11,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982131665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:11,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-12T05:41:11,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:11,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982131769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:11,795 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/9a018a296cde4b7099057aa2fd47d650 2024-12-12T05:41:11,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/08075e5a23c34612817515f31c7dcc97 is 50, key is test_row_0/B:col10/1733982070465/Put/seqid=0 2024-12-12T05:41:11,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742290_1466 (size=12151) 2024-12-12T05:41:11,976 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:11,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982131974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:12,204 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/08075e5a23c34612817515f31c7dcc97 2024-12-12T05:41:12,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/ea6023daaa1b4941bff43f1895c550b1 is 50, key is test_row_0/C:col10/1733982070465/Put/seqid=0 2024-12-12T05:41:12,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742291_1467 (size=12151) 2024-12-12T05:41:12,280 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:12,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982132277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:12,613 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/ea6023daaa1b4941bff43f1895c550b1 2024-12-12T05:41:12,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/9a018a296cde4b7099057aa2fd47d650 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/9a018a296cde4b7099057aa2fd47d650 2024-12-12T05:41:12,668 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/9a018a296cde4b7099057aa2fd47d650, entries=150, sequenceid=213, filesize=11.9 K 2024-12-12T05:41:12,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/08075e5a23c34612817515f31c7dcc97 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/08075e5a23c34612817515f31c7dcc97 2024-12-12T05:41:12,672 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/08075e5a23c34612817515f31c7dcc97, entries=150, sequenceid=213, filesize=11.9 K 2024-12-12T05:41:12,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/ea6023daaa1b4941bff43f1895c550b1 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/ea6023daaa1b4941bff43f1895c550b1 2024-12-12T05:41:12,675 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/ea6023daaa1b4941bff43f1895c550b1, entries=150, sequenceid=213, filesize=11.9 K 2024-12-12T05:41:12,676 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for ec02d2a34ba48290fed943fe35718728 in 1288ms, sequenceid=213, compaction requested=true 2024-12-12T05:41:12,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:12,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:12,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-12-12T05:41:12,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-12-12T05:41:12,678 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-12-12T05:41:12,678 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0500 sec 2024-12-12T05:41:12,678 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 2.0520 sec 2024-12-12T05:41:12,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-12T05:41:12,730 INFO [Thread-1906 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-12-12T05:41:12,731 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:41:12,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-12-12T05:41:12,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-12T05:41:12,732 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:41:12,732 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:41:12,732 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:41:12,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on ec02d2a34ba48290fed943fe35718728 2024-12-12T05:41:12,787 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ec02d2a34ba48290fed943fe35718728 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-12T05:41:12,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=A 2024-12-12T05:41:12,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:12,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=B 2024-12-12T05:41:12,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:12,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=C 2024-12-12T05:41:12,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:12,791 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/342977bd011945adbb10433ee63fd58f is 50, key is test_row_0/A:col10/1733982072787/Put/seqid=0 2024-12-12T05:41:12,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742292_1468 (size=14541) 2024-12-12T05:41:12,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-12T05:41:12,854 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:12,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982132850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:12,883 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:12,884 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-12T05:41:12,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:12,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:12,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:12,884 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:12,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:12,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:12,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:12,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982132955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:13,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-12T05:41:13,036 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:13,036 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-12T05:41:13,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:13,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:13,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:13,036 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:13,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:13,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:13,163 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:13,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982133158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:13,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:13,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982133158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:13,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:13,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982133159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:13,164 DEBUG [Thread-1896 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4140 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., hostname=83e80bf221ca,46457,1733981928566, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:41:13,164 DEBUG [Thread-1898 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4139 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., hostname=83e80bf221ca,46457,1733981928566, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:41:13,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:13,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982133161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:13,165 DEBUG [Thread-1900 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4140 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., hostname=83e80bf221ca,46457,1733981928566, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:41:13,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:13,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982133169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:13,173 DEBUG [Thread-1904 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4150 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., hostname=83e80bf221ca,46457,1733981928566, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:41:13,188 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:13,188 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-12T05:41:13,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:13,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:13,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:13,188 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:13,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:13,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:13,195 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/342977bd011945adbb10433ee63fd58f 2024-12-12T05:41:13,199 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/c3d2c4db09294fd9b3620c4657a1e71e is 50, key is test_row_0/B:col10/1733982072787/Put/seqid=0 2024-12-12T05:41:13,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742293_1469 (size=12151) 2024-12-12T05:41:13,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-12T05:41:13,340 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:13,340 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-12T05:41:13,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:13,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:13,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:13,341 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:13,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:13,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:13,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:13,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982133466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:13,492 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:13,493 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-12T05:41:13,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:13,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:13,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:13,493 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:13,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:13,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:13,602 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/c3d2c4db09294fd9b3620c4657a1e71e 2024-12-12T05:41:13,607 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/3c9b70c63a0a4fb181a5a915d8a540dc is 50, key is test_row_0/C:col10/1733982072787/Put/seqid=0 2024-12-12T05:41:13,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742294_1470 (size=12151) 2024-12-12T05:41:13,645 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:13,645 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-12T05:41:13,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:13,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:13,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:13,645 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:13,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:13,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:13,797 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:13,797 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-12T05:41:13,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:13,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:13,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:13,798 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:13,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:13,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:13,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-12T05:41:13,949 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:13,950 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-12T05:41:13,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:13,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:13,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:13,950 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:13,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:13,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:13,972 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:13,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982133969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:14,011 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/3c9b70c63a0a4fb181a5a915d8a540dc 2024-12-12T05:41:14,014 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/342977bd011945adbb10433ee63fd58f as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/342977bd011945adbb10433ee63fd58f 2024-12-12T05:41:14,016 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/342977bd011945adbb10433ee63fd58f, entries=200, sequenceid=233, filesize=14.2 K 2024-12-12T05:41:14,017 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/c3d2c4db09294fd9b3620c4657a1e71e as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/c3d2c4db09294fd9b3620c4657a1e71e 2024-12-12T05:41:14,019 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/c3d2c4db09294fd9b3620c4657a1e71e, entries=150, sequenceid=233, filesize=11.9 K 2024-12-12T05:41:14,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/3c9b70c63a0a4fb181a5a915d8a540dc as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/3c9b70c63a0a4fb181a5a915d8a540dc 2024-12-12T05:41:14,022 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/3c9b70c63a0a4fb181a5a915d8a540dc, entries=150, sequenceid=233, filesize=11.9 K 2024-12-12T05:41:14,023 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for ec02d2a34ba48290fed943fe35718728 in 1235ms, sequenceid=233, compaction requested=true 2024-12-12T05:41:14,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:14,023 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T05:41:14,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ec02d2a34ba48290fed943fe35718728:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:41:14,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:14,023 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T05:41:14,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ec02d2a34ba48290fed943fe35718728:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:41:14,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:14,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ec02d2a34ba48290fed943fe35718728:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:41:14,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:14,024 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 53794 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T05:41:14,024 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): ec02d2a34ba48290fed943fe35718728/A is initiating minor compaction (all files) 2024-12-12T05:41:14,024 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ec02d2a34ba48290fed943fe35718728/A in TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:14,024 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T05:41:14,024 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): ec02d2a34ba48290fed943fe35718728/B is initiating minor compaction (all files) 2024-12-12T05:41:14,024 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/5cc4f89fa1b143a28c364cb146ad1caf, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/85905f825d8c488183edd74514392658, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/9a018a296cde4b7099057aa2fd47d650, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/342977bd011945adbb10433ee63fd58f] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp, totalSize=52.5 K 2024-12-12T05:41:14,024 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ec02d2a34ba48290fed943fe35718728/B in TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:14,024 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/5b301a990b3e4a22a1817cff6150abfa, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/23fffdb49373453fa0f944a1f5b2a78f, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/08075e5a23c34612817515f31c7dcc97, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/c3d2c4db09294fd9b3620c4657a1e71e] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp, totalSize=47.9 K 2024-12-12T05:41:14,024 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5cc4f89fa1b143a28c364cb146ad1caf, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1733982068397 2024-12-12T05:41:14,024 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 5b301a990b3e4a22a1817cff6150abfa, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1733982068397 2024-12-12T05:41:14,024 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 85905f825d8c488183edd74514392658, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733982069020 2024-12-12T05:41:14,024 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 23fffdb49373453fa0f944a1f5b2a78f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733982069020 2024-12-12T05:41:14,025 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9a018a296cde4b7099057aa2fd47d650, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733982070451 2024-12-12T05:41:14,025 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 08075e5a23c34612817515f31c7dcc97, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733982070451 2024-12-12T05:41:14,025 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 342977bd011945adbb10433ee63fd58f, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733982071641 2024-12-12T05:41:14,025 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting c3d2c4db09294fd9b3620c4657a1e71e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733982071641 2024-12-12T05:41:14,031 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ec02d2a34ba48290fed943fe35718728#A#compaction#396 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:14,031 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/6afea58b6de64c1eaa2603c0c1a68d70 is 50, key is test_row_0/A:col10/1733982072787/Put/seqid=0 2024-12-12T05:41:14,031 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ec02d2a34ba48290fed943fe35718728#B#compaction#397 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:14,032 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/fe084169703f4999a272e9e84ca8adda is 50, key is test_row_0/B:col10/1733982072787/Put/seqid=0 2024-12-12T05:41:14,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742295_1471 (size=12697) 2024-12-12T05:41:14,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742296_1472 (size=12697) 2024-12-12T05:41:14,102 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:14,102 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-12T05:41:14,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:14,102 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing ec02d2a34ba48290fed943fe35718728 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-12T05:41:14,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=A 2024-12-12T05:41:14,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:14,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=B 2024-12-12T05:41:14,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:14,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=C 2024-12-12T05:41:14,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:14,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/735aea51d776443599f8330fb7cae219 is 50, key is test_row_0/A:col10/1733982072839/Put/seqid=0 2024-12-12T05:41:14,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742297_1473 (size=12151) 2024-12-12T05:41:14,438 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/6afea58b6de64c1eaa2603c0c1a68d70 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/6afea58b6de64c1eaa2603c0c1a68d70 2024-12-12T05:41:14,441 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ec02d2a34ba48290fed943fe35718728/A of ec02d2a34ba48290fed943fe35718728 into 6afea58b6de64c1eaa2603c0c1a68d70(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:14,441 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:14,441 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., storeName=ec02d2a34ba48290fed943fe35718728/A, priority=12, startTime=1733982074023; duration=0sec 2024-12-12T05:41:14,441 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:14,441 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ec02d2a34ba48290fed943fe35718728:A 2024-12-12T05:41:14,441 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T05:41:14,442 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T05:41:14,442 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): ec02d2a34ba48290fed943fe35718728/C is initiating minor compaction (all files) 2024-12-12T05:41:14,442 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ec02d2a34ba48290fed943fe35718728/C in TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:14,442 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/5aff2499b3ea472cb0fb8e9ea19e29d8, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/cbd9776c957c4e9b8d1f703ec2e69e5c, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/ea6023daaa1b4941bff43f1895c550b1, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/3c9b70c63a0a4fb181a5a915d8a540dc] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp, totalSize=47.9 K 2024-12-12T05:41:14,442 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5aff2499b3ea472cb0fb8e9ea19e29d8, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1733982068397 2024-12-12T05:41:14,442 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting cbd9776c957c4e9b8d1f703ec2e69e5c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733982069020 2024-12-12T05:41:14,442 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting ea6023daaa1b4941bff43f1895c550b1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733982070451 2024-12-12T05:41:14,443 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c9b70c63a0a4fb181a5a915d8a540dc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733982071641 2024-12-12T05:41:14,443 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/fe084169703f4999a272e9e84ca8adda as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/fe084169703f4999a272e9e84ca8adda 2024-12-12T05:41:14,446 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ec02d2a34ba48290fed943fe35718728/B of ec02d2a34ba48290fed943fe35718728 into fe084169703f4999a272e9e84ca8adda(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:14,446 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:14,446 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., storeName=ec02d2a34ba48290fed943fe35718728/B, priority=12, startTime=1733982074023; duration=0sec 2024-12-12T05:41:14,446 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:14,446 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ec02d2a34ba48290fed943fe35718728:B 2024-12-12T05:41:14,448 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ec02d2a34ba48290fed943fe35718728#C#compaction#399 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:14,448 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/2c516aa4b48c4169a12a0b5d5c486951 is 50, key is test_row_0/C:col10/1733982072787/Put/seqid=0 2024-12-12T05:41:14,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742298_1474 (size=12697) 2024-12-12T05:41:14,510 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/735aea51d776443599f8330fb7cae219 2024-12-12T05:41:14,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/07339740d54c4af8b67467f0cff8f153 is 50, key is test_row_0/B:col10/1733982072839/Put/seqid=0 2024-12-12T05:41:14,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742299_1475 (size=12151) 2024-12-12T05:41:14,519 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/07339740d54c4af8b67467f0cff8f153 2024-12-12T05:41:14,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/2445cb3b003d4389a78fdcc2ade47f5b is 50, key is test_row_0/C:col10/1733982072839/Put/seqid=0 2024-12-12T05:41:14,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742300_1476 (size=12151) 2024-12-12T05:41:14,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-12T05:41:14,854 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/2c516aa4b48c4169a12a0b5d5c486951 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/2c516aa4b48c4169a12a0b5d5c486951 2024-12-12T05:41:14,856 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ec02d2a34ba48290fed943fe35718728/C of ec02d2a34ba48290fed943fe35718728 into 2c516aa4b48c4169a12a0b5d5c486951(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:14,856 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:14,856 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., storeName=ec02d2a34ba48290fed943fe35718728/C, priority=12, startTime=1733982074023; duration=0sec 2024-12-12T05:41:14,856 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:14,857 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ec02d2a34ba48290fed943fe35718728:C 2024-12-12T05:41:14,928 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/2445cb3b003d4389a78fdcc2ade47f5b 2024-12-12T05:41:14,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/735aea51d776443599f8330fb7cae219 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/735aea51d776443599f8330fb7cae219 2024-12-12T05:41:14,933 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/735aea51d776443599f8330fb7cae219, entries=150, sequenceid=249, filesize=11.9 K 2024-12-12T05:41:14,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/07339740d54c4af8b67467f0cff8f153 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/07339740d54c4af8b67467f0cff8f153 2024-12-12T05:41:14,936 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/07339740d54c4af8b67467f0cff8f153, entries=150, sequenceid=249, filesize=11.9 K 2024-12-12T05:41:14,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/2445cb3b003d4389a78fdcc2ade47f5b as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/2445cb3b003d4389a78fdcc2ade47f5b 2024-12-12T05:41:14,939 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/2445cb3b003d4389a78fdcc2ade47f5b, entries=150, sequenceid=249, filesize=11.9 K 2024-12-12T05:41:14,940 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=0 B/0 for ec02d2a34ba48290fed943fe35718728 in 837ms, sequenceid=249, compaction requested=false 2024-12-12T05:41:14,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:14,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:14,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-12-12T05:41:14,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-12-12T05:41:14,941 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-12-12T05:41:14,941 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2080 sec 2024-12-12T05:41:14,942 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 2.2110 sec 2024-12-12T05:41:14,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on ec02d2a34ba48290fed943fe35718728 2024-12-12T05:41:14,987 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ec02d2a34ba48290fed943fe35718728 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T05:41:14,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=A 2024-12-12T05:41:14,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:14,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=B 2024-12-12T05:41:14,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:14,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=C 2024-12-12T05:41:14,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:14,990 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/2fd27e9e83b1437693c61b36742561a6 is 50, key is test_row_0/A:col10/1733982074987/Put/seqid=0 2024-12-12T05:41:14,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742301_1477 (size=12301) 2024-12-12T05:41:15,088 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:15,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982135084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:15,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:15,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982135189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:15,394 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=263 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/2fd27e9e83b1437693c61b36742561a6 2024-12-12T05:41:15,395 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:15,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982135393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:15,399 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/13108b889e66414d97a449cb5828be40 is 50, key is test_row_0/B:col10/1733982074987/Put/seqid=0 2024-12-12T05:41:15,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742302_1478 (size=12301) 2024-12-12T05:41:15,699 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:15,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982135697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:15,802 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=263 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/13108b889e66414d97a449cb5828be40 2024-12-12T05:41:15,807 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/f210f605aa544bc9815d2afccae068b2 is 50, key is test_row_0/C:col10/1733982074987/Put/seqid=0 2024-12-12T05:41:15,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742303_1479 (size=12301) 2024-12-12T05:41:15,810 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=263 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/f210f605aa544bc9815d2afccae068b2 2024-12-12T05:41:15,812 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/2fd27e9e83b1437693c61b36742561a6 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/2fd27e9e83b1437693c61b36742561a6 2024-12-12T05:41:15,815 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/2fd27e9e83b1437693c61b36742561a6, entries=150, sequenceid=263, filesize=12.0 K 2024-12-12T05:41:15,815 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/13108b889e66414d97a449cb5828be40 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/13108b889e66414d97a449cb5828be40 2024-12-12T05:41:15,818 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/13108b889e66414d97a449cb5828be40, entries=150, sequenceid=263, filesize=12.0 K 2024-12-12T05:41:15,818 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/f210f605aa544bc9815d2afccae068b2 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/f210f605aa544bc9815d2afccae068b2 2024-12-12T05:41:15,821 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/f210f605aa544bc9815d2afccae068b2, entries=150, sequenceid=263, filesize=12.0 K 2024-12-12T05:41:15,822 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for ec02d2a34ba48290fed943fe35718728 in 835ms, sequenceid=263, compaction requested=true 2024-12-12T05:41:15,822 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:15,822 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ec02d2a34ba48290fed943fe35718728:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:41:15,822 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:15,822 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:15,822 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ec02d2a34ba48290fed943fe35718728:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:41:15,822 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:15,822 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ec02d2a34ba48290fed943fe35718728:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:41:15,822 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:15,822 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:15,823 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:15,823 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): ec02d2a34ba48290fed943fe35718728/A is initiating minor compaction (all files) 2024-12-12T05:41:15,823 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:15,823 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ec02d2a34ba48290fed943fe35718728/A in TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:15,823 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): ec02d2a34ba48290fed943fe35718728/B is initiating minor compaction (all files) 2024-12-12T05:41:15,823 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ec02d2a34ba48290fed943fe35718728/B in TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:15,823 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/6afea58b6de64c1eaa2603c0c1a68d70, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/735aea51d776443599f8330fb7cae219, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/2fd27e9e83b1437693c61b36742561a6] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp, totalSize=36.3 K 2024-12-12T05:41:15,823 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/fe084169703f4999a272e9e84ca8adda, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/07339740d54c4af8b67467f0cff8f153, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/13108b889e66414d97a449cb5828be40] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp, totalSize=36.3 K 2024-12-12T05:41:15,823 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting fe084169703f4999a272e9e84ca8adda, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733982071641 2024-12-12T05:41:15,823 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6afea58b6de64c1eaa2603c0c1a68d70, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733982071641 2024-12-12T05:41:15,823 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 735aea51d776443599f8330fb7cae219, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1733982072839 2024-12-12T05:41:15,823 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 07339740d54c4af8b67467f0cff8f153, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1733982072839 2024-12-12T05:41:15,824 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 13108b889e66414d97a449cb5828be40, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1733982074979 2024-12-12T05:41:15,824 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2fd27e9e83b1437693c61b36742561a6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1733982074979 2024-12-12T05:41:15,832 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ec02d2a34ba48290fed943fe35718728#B#compaction#405 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:15,832 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/b7fdcc6b87ee4fa2b98f387a2585b692 is 50, key is test_row_0/B:col10/1733982074987/Put/seqid=0 2024-12-12T05:41:15,834 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ec02d2a34ba48290fed943fe35718728#A#compaction#406 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:15,834 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/58830330c4fc41daa8a8c263d5052b5c is 50, key is test_row_0/A:col10/1733982074987/Put/seqid=0 2024-12-12T05:41:15,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742304_1480 (size=12949) 2024-12-12T05:41:15,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742305_1481 (size=12949) 2024-12-12T05:41:16,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on ec02d2a34ba48290fed943fe35718728 2024-12-12T05:41:16,202 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ec02d2a34ba48290fed943fe35718728 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T05:41:16,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=A 2024-12-12T05:41:16,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:16,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=B 2024-12-12T05:41:16,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:16,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=C 2024-12-12T05:41:16,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:16,205 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/7eaa38e017c34855a80dcc67663d1039 is 50, key is test_row_0/A:col10/1733982076200/Put/seqid=0 2024-12-12T05:41:16,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742306_1482 (size=14741) 2024-12-12T05:41:16,239 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/b7fdcc6b87ee4fa2b98f387a2585b692 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/b7fdcc6b87ee4fa2b98f387a2585b692 2024-12-12T05:41:16,240 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/58830330c4fc41daa8a8c263d5052b5c as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/58830330c4fc41daa8a8c263d5052b5c 2024-12-12T05:41:16,242 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ec02d2a34ba48290fed943fe35718728/B of ec02d2a34ba48290fed943fe35718728 into b7fdcc6b87ee4fa2b98f387a2585b692(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:16,242 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:16,242 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., storeName=ec02d2a34ba48290fed943fe35718728/B, priority=13, startTime=1733982075822; duration=0sec 2024-12-12T05:41:16,243 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:16,243 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ec02d2a34ba48290fed943fe35718728:B 2024-12-12T05:41:16,243 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:16,243 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ec02d2a34ba48290fed943fe35718728/A of ec02d2a34ba48290fed943fe35718728 into 58830330c4fc41daa8a8c263d5052b5c(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:16,243 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:16,243 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., storeName=ec02d2a34ba48290fed943fe35718728/A, priority=13, startTime=1733982075822; duration=0sec 2024-12-12T05:41:16,243 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:16,243 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ec02d2a34ba48290fed943fe35718728:A 2024-12-12T05:41:16,243 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:16,244 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): ec02d2a34ba48290fed943fe35718728/C is initiating minor compaction (all files) 2024-12-12T05:41:16,244 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ec02d2a34ba48290fed943fe35718728/C in TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:16,244 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/2c516aa4b48c4169a12a0b5d5c486951, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/2445cb3b003d4389a78fdcc2ade47f5b, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/f210f605aa544bc9815d2afccae068b2] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp, totalSize=36.3 K 2024-12-12T05:41:16,244 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c516aa4b48c4169a12a0b5d5c486951, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733982071641 2024-12-12T05:41:16,244 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 2445cb3b003d4389a78fdcc2ade47f5b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1733982072839 2024-12-12T05:41:16,244 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting f210f605aa544bc9815d2afccae068b2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1733982074979 2024-12-12T05:41:16,248 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:16,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982136240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:16,248 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ec02d2a34ba48290fed943fe35718728#C#compaction#408 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:16,249 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/3f47bbaff6ef442e86526a02949d0a5e is 50, key is test_row_0/C:col10/1733982074987/Put/seqid=0 2024-12-12T05:41:16,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742307_1483 (size=12949) 2024-12-12T05:41:16,351 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:16,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982136349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:16,555 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:16,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982136552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:16,609 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/7eaa38e017c34855a80dcc67663d1039 2024-12-12T05:41:16,613 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/f093ab89c1d34675b1c15ffb62248b4d is 50, key is test_row_0/B:col10/1733982076200/Put/seqid=0 2024-12-12T05:41:16,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742308_1484 (size=12301) 2024-12-12T05:41:16,616 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/f093ab89c1d34675b1c15ffb62248b4d 2024-12-12T05:41:16,622 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/0a0fc3ec788243ebb8637116983dc1b1 is 50, key is test_row_0/C:col10/1733982076200/Put/seqid=0 2024-12-12T05:41:16,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742309_1485 (size=12301) 2024-12-12T05:41:16,625 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/0a0fc3ec788243ebb8637116983dc1b1 2024-12-12T05:41:16,628 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/7eaa38e017c34855a80dcc67663d1039 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/7eaa38e017c34855a80dcc67663d1039 2024-12-12T05:41:16,630 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/7eaa38e017c34855a80dcc67663d1039, entries=200, sequenceid=289, filesize=14.4 K 2024-12-12T05:41:16,631 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/f093ab89c1d34675b1c15ffb62248b4d as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/f093ab89c1d34675b1c15ffb62248b4d 2024-12-12T05:41:16,633 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/f093ab89c1d34675b1c15ffb62248b4d, entries=150, sequenceid=289, filesize=12.0 K 2024-12-12T05:41:16,634 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/0a0fc3ec788243ebb8637116983dc1b1 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/0a0fc3ec788243ebb8637116983dc1b1 2024-12-12T05:41:16,636 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/0a0fc3ec788243ebb8637116983dc1b1, entries=150, sequenceid=289, filesize=12.0 K 2024-12-12T05:41:16,637 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for ec02d2a34ba48290fed943fe35718728 in 436ms, sequenceid=289, compaction requested=false 2024-12-12T05:41:16,637 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:16,663 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/3f47bbaff6ef442e86526a02949d0a5e as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/3f47bbaff6ef442e86526a02949d0a5e 2024-12-12T05:41:16,666 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ec02d2a34ba48290fed943fe35718728/C of ec02d2a34ba48290fed943fe35718728 into 3f47bbaff6ef442e86526a02949d0a5e(size=12.6 K), total size for store is 24.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:16,666 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:16,666 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., storeName=ec02d2a34ba48290fed943fe35718728/C, priority=13, startTime=1733982075822; duration=0sec 2024-12-12T05:41:16,666 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:16,666 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ec02d2a34ba48290fed943fe35718728:C 2024-12-12T05:41:16,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-12T05:41:16,836 INFO [Thread-1906 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-12-12T05:41:16,836 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:41:16,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-12-12T05:41:16,838 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:41:16,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-12T05:41:16,838 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:41:16,838 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:41:16,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on ec02d2a34ba48290fed943fe35718728 2024-12-12T05:41:16,860 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ec02d2a34ba48290fed943fe35718728 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T05:41:16,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=A 2024-12-12T05:41:16,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:16,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=B 2024-12-12T05:41:16,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:16,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=C 2024-12-12T05:41:16,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:16,863 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/f6171c6091e045448de9130f11ecf633 is 50, key is test_row_0/A:col10/1733982076226/Put/seqid=0 2024-12-12T05:41:16,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742310_1486 (size=14741) 2024-12-12T05:41:16,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-12T05:41:16,956 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:16,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982136953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:16,987 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T05:41:16,989 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:16,989 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-12T05:41:16,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:16,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:16,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:16,990 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:16,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:16,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:17,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:17,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982137057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:17,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-12T05:41:17,142 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:17,142 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-12T05:41:17,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:17,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:17,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:17,143 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:17,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:17,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:17,172 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:17,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45434 deadline: 1733982137170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:17,172 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:17,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45402 deadline: 1733982137171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:17,173 DEBUG [Thread-1896 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8148 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., hostname=83e80bf221ca,46457,1733981928566, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:41:17,173 DEBUG [Thread-1898 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8147 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., hostname=83e80bf221ca,46457,1733981928566, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:41:17,184 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:17,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45420 deadline: 1733982137181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:17,185 DEBUG [Thread-1900 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8160 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., hostname=83e80bf221ca,46457,1733981928566, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:41:17,209 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:17,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45442 deadline: 1733982137207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:17,209 DEBUG [Thread-1904 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8186 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., hostname=83e80bf221ca,46457,1733981928566, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:41:17,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:17,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982137260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:17,266 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/f6171c6091e045448de9130f11ecf633 2024-12-12T05:41:17,271 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/ba5be885e2d8413e9ad1b2179a25498d is 50, key is test_row_0/B:col10/1733982076226/Put/seqid=0 2024-12-12T05:41:17,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742311_1487 (size=12301) 2024-12-12T05:41:17,275 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/ba5be885e2d8413e9ad1b2179a25498d 2024-12-12T05:41:17,279 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/ac5f6566927942698b3d51db2b52029b is 50, key is test_row_0/C:col10/1733982076226/Put/seqid=0 2024-12-12T05:41:17,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742312_1488 (size=12301) 2024-12-12T05:41:17,294 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:17,294 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-12T05:41:17,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:17,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:17,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:17,295 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:17,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:17,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:17,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-12T05:41:17,446 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:17,447 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-12T05:41:17,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:17,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:17,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:17,447 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:17,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:17,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:17,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:17,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982137565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:17,598 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:17,599 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-12T05:41:17,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:17,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:17,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:17,599 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:17,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:17,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:17,683 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/ac5f6566927942698b3d51db2b52029b 2024-12-12T05:41:17,686 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/f6171c6091e045448de9130f11ecf633 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/f6171c6091e045448de9130f11ecf633 2024-12-12T05:41:17,688 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/f6171c6091e045448de9130f11ecf633, entries=200, sequenceid=303, filesize=14.4 K 2024-12-12T05:41:17,689 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/ba5be885e2d8413e9ad1b2179a25498d as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/ba5be885e2d8413e9ad1b2179a25498d 2024-12-12T05:41:17,691 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/ba5be885e2d8413e9ad1b2179a25498d, entries=150, sequenceid=303, filesize=12.0 K 2024-12-12T05:41:17,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/ac5f6566927942698b3d51db2b52029b as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/ac5f6566927942698b3d51db2b52029b 2024-12-12T05:41:17,695 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/ac5f6566927942698b3d51db2b52029b, entries=150, sequenceid=303, filesize=12.0 K 2024-12-12T05:41:17,695 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for ec02d2a34ba48290fed943fe35718728 in 835ms, sequenceid=303, compaction requested=true 2024-12-12T05:41:17,695 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:17,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ec02d2a34ba48290fed943fe35718728:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:41:17,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:17,695 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:17,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ec02d2a34ba48290fed943fe35718728:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:41:17,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:17,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ec02d2a34ba48290fed943fe35718728:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:41:17,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:17,695 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:17,696 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:17,696 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42431 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:17,696 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): ec02d2a34ba48290fed943fe35718728/A is initiating minor compaction (all files) 2024-12-12T05:41:17,696 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): ec02d2a34ba48290fed943fe35718728/B is initiating minor compaction (all files) 2024-12-12T05:41:17,696 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ec02d2a34ba48290fed943fe35718728/B in TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:17,696 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ec02d2a34ba48290fed943fe35718728/A in TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:17,696 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/58830330c4fc41daa8a8c263d5052b5c, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/7eaa38e017c34855a80dcc67663d1039, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/f6171c6091e045448de9130f11ecf633] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp, totalSize=41.4 K 2024-12-12T05:41:17,696 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/b7fdcc6b87ee4fa2b98f387a2585b692, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/f093ab89c1d34675b1c15ffb62248b4d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/ba5be885e2d8413e9ad1b2179a25498d] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp, totalSize=36.7 K 2024-12-12T05:41:17,696 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 58830330c4fc41daa8a8c263d5052b5c, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1733982074979 2024-12-12T05:41:17,696 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting b7fdcc6b87ee4fa2b98f387a2585b692, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1733982074979 2024-12-12T05:41:17,697 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting f093ab89c1d34675b1c15ffb62248b4d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1733982075076 2024-12-12T05:41:17,697 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7eaa38e017c34855a80dcc67663d1039, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1733982075072 2024-12-12T05:41:17,697 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting f6171c6091e045448de9130f11ecf633, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1733982076207 2024-12-12T05:41:17,697 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting ba5be885e2d8413e9ad1b2179a25498d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1733982076226 2024-12-12T05:41:17,702 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ec02d2a34ba48290fed943fe35718728#B#compaction#414 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:17,703 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/59f2e89698af4e0989439329af60da6b is 50, key is test_row_0/B:col10/1733982076226/Put/seqid=0 2024-12-12T05:41:17,706 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ec02d2a34ba48290fed943fe35718728#A#compaction#415 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:17,706 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/50f5ac9fbe38466bac334b635023d5bb is 50, key is test_row_0/A:col10/1733982076226/Put/seqid=0 2024-12-12T05:41:17,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742313_1489 (size=13051) 2024-12-12T05:41:17,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742314_1490 (size=13051) 2024-12-12T05:41:17,750 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:17,751 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-12T05:41:17,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:17,751 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing ec02d2a34ba48290fed943fe35718728 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T05:41:17,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=A 2024-12-12T05:41:17,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:17,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=B 2024-12-12T05:41:17,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:17,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=C 2024-12-12T05:41:17,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:17,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/ddee4469c3f44bf8b77b1501c8be17a2 is 50, key is test_row_0/A:col10/1733982076920/Put/seqid=0 2024-12-12T05:41:17,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742315_1491 (size=12301) 2024-12-12T05:41:17,758 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/ddee4469c3f44bf8b77b1501c8be17a2 2024-12-12T05:41:17,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/71d1ac6b94a2404cb39a0f4569862122 is 50, key is test_row_0/B:col10/1733982076920/Put/seqid=0 2024-12-12T05:41:17,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742316_1492 (size=12301) 2024-12-12T05:41:17,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-12T05:41:18,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on ec02d2a34ba48290fed943fe35718728 2024-12-12T05:41:18,075 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:18,110 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:18,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982138106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:18,115 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/59f2e89698af4e0989439329af60da6b as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/59f2e89698af4e0989439329af60da6b 2024-12-12T05:41:18,115 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/50f5ac9fbe38466bac334b635023d5bb as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/50f5ac9fbe38466bac334b635023d5bb 2024-12-12T05:41:18,119 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ec02d2a34ba48290fed943fe35718728/B of ec02d2a34ba48290fed943fe35718728 into 59f2e89698af4e0989439329af60da6b(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:18,119 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ec02d2a34ba48290fed943fe35718728/A of ec02d2a34ba48290fed943fe35718728 into 50f5ac9fbe38466bac334b635023d5bb(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:18,119 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:18,119 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:18,119 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., storeName=ec02d2a34ba48290fed943fe35718728/A, priority=13, startTime=1733982077695; duration=0sec 2024-12-12T05:41:18,119 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., storeName=ec02d2a34ba48290fed943fe35718728/B, priority=13, startTime=1733982077695; duration=0sec 2024-12-12T05:41:18,119 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:18,119 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:18,119 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ec02d2a34ba48290fed943fe35718728:B 2024-12-12T05:41:18,119 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ec02d2a34ba48290fed943fe35718728:A 2024-12-12T05:41:18,119 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:18,120 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:18,120 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): ec02d2a34ba48290fed943fe35718728/C is initiating minor compaction (all files) 2024-12-12T05:41:18,120 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ec02d2a34ba48290fed943fe35718728/C in TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:18,120 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/3f47bbaff6ef442e86526a02949d0a5e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/0a0fc3ec788243ebb8637116983dc1b1, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/ac5f6566927942698b3d51db2b52029b] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp, totalSize=36.7 K 2024-12-12T05:41:18,120 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3f47bbaff6ef442e86526a02949d0a5e, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1733982074979 2024-12-12T05:41:18,121 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a0fc3ec788243ebb8637116983dc1b1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1733982075076 2024-12-12T05:41:18,121 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting ac5f6566927942698b3d51db2b52029b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1733982076226 2024-12-12T05:41:18,126 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ec02d2a34ba48290fed943fe35718728#C#compaction#418 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:18,126 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/a13840853f9a418b94ffbad2e9e0c95d is 50, key is test_row_0/C:col10/1733982076226/Put/seqid=0 2024-12-12T05:41:18,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742317_1493 (size=13051) 2024-12-12T05:41:18,167 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/71d1ac6b94a2404cb39a0f4569862122 2024-12-12T05:41:18,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/0b744c1f14ad40e0a0d40bbae306d95a is 50, key is test_row_0/C:col10/1733982076920/Put/seqid=0 2024-12-12T05:41:18,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742318_1494 (size=12301) 2024-12-12T05:41:18,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:18,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982138211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:18,418 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:18,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982138415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:18,532 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/a13840853f9a418b94ffbad2e9e0c95d as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/a13840853f9a418b94ffbad2e9e0c95d 2024-12-12T05:41:18,535 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ec02d2a34ba48290fed943fe35718728/C of ec02d2a34ba48290fed943fe35718728 into a13840853f9a418b94ffbad2e9e0c95d(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:18,535 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:18,535 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., storeName=ec02d2a34ba48290fed943fe35718728/C, priority=13, startTime=1733982077695; duration=0sec 2024-12-12T05:41:18,535 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:18,535 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ec02d2a34ba48290fed943fe35718728:C 2024-12-12T05:41:18,576 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/0b744c1f14ad40e0a0d40bbae306d95a 2024-12-12T05:41:18,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/ddee4469c3f44bf8b77b1501c8be17a2 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/ddee4469c3f44bf8b77b1501c8be17a2 2024-12-12T05:41:18,581 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/ddee4469c3f44bf8b77b1501c8be17a2, entries=150, sequenceid=328, filesize=12.0 K 2024-12-12T05:41:18,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/71d1ac6b94a2404cb39a0f4569862122 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/71d1ac6b94a2404cb39a0f4569862122 2024-12-12T05:41:18,584 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/71d1ac6b94a2404cb39a0f4569862122, entries=150, sequenceid=328, filesize=12.0 K 2024-12-12T05:41:18,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/0b744c1f14ad40e0a0d40bbae306d95a as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/0b744c1f14ad40e0a0d40bbae306d95a 2024-12-12T05:41:18,588 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/0b744c1f14ad40e0a0d40bbae306d95a, entries=150, sequenceid=328, filesize=12.0 K 2024-12-12T05:41:18,588 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for ec02d2a34ba48290fed943fe35718728 in 837ms, sequenceid=328, compaction requested=false 2024-12-12T05:41:18,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:18,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:18,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-12-12T05:41:18,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-12-12T05:41:18,590 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-12-12T05:41:18,590 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7510 sec 2024-12-12T05:41:18,591 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 1.7540 sec 2024-12-12T05:41:18,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on ec02d2a34ba48290fed943fe35718728 2024-12-12T05:41:18,720 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ec02d2a34ba48290fed943fe35718728 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T05:41:18,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=A 2024-12-12T05:41:18,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:18,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=B 2024-12-12T05:41:18,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:18,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=C 2024-12-12T05:41:18,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:18,723 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/7b3db271a0ec4c6fa7fdd86edb6e47aa is 50, key is test_row_0/A:col10/1733982078719/Put/seqid=0 2024-12-12T05:41:18,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742319_1495 (size=12297) 2024-12-12T05:41:18,793 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:18,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 271 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982138789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:18,896 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:18,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 273 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982138894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:18,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-12T05:41:18,941 INFO [Thread-1906 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-12-12T05:41:18,942 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:41:18,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-12-12T05:41:18,943 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:41:18,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-12T05:41:18,943 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:41:18,943 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:41:19,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-12T05:41:19,094 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:19,095 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-12T05:41:19,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:19,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:19,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:19,095 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:19,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:19,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:19,102 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:19,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 275 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982139097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:19,127 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/7b3db271a0ec4c6fa7fdd86edb6e47aa 2024-12-12T05:41:19,132 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/dfe72559c269471fbc59e0a9ab8a9ef3 is 50, key is test_row_0/B:col10/1733982078719/Put/seqid=0 2024-12-12T05:41:19,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742320_1496 (size=9857) 2024-12-12T05:41:19,224 DEBUG [Thread-1913 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0c692575 to 127.0.0.1:60303 2024-12-12T05:41:19,224 DEBUG [Thread-1909 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x184771cf to 127.0.0.1:60303 2024-12-12T05:41:19,224 DEBUG [Thread-1913 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:41:19,224 DEBUG [Thread-1909 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:41:19,226 DEBUG [Thread-1907 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0f2423f3 to 127.0.0.1:60303 2024-12-12T05:41:19,226 DEBUG [Thread-1907 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:41:19,228 DEBUG [Thread-1915 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1cbd2497 to 127.0.0.1:60303 2024-12-12T05:41:19,228 DEBUG [Thread-1915 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:41:19,228 DEBUG [Thread-1911 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x076f0408 to 127.0.0.1:60303 2024-12-12T05:41:19,228 DEBUG [Thread-1911 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:41:19,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-12T05:41:19,246 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:19,247 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-12T05:41:19,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:19,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:19,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:19,247 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:19,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:19,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:19,399 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:19,400 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-12T05:41:19,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:19,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:19,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:19,401 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:19,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:19,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:19,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:19,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 277 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982139404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:19,536 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/dfe72559c269471fbc59e0a9ab8a9ef3 2024-12-12T05:41:19,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-12T05:41:19,549 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/1fb7eaa286424546b6076ec5a16ec77c is 50, key is test_row_0/C:col10/1733982078719/Put/seqid=0 2024-12-12T05:41:19,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742321_1497 (size=9857) 2024-12-12T05:41:19,555 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:19,556 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-12T05:41:19,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:19,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:19,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:19,556 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:19,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:19,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:19,708 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:19,709 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-12T05:41:19,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:19,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:19,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:19,710 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:19,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:19,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:19,863 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:19,864 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-12T05:41:19,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:19,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:19,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:19,864 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:19,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:19,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:19,912 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:19,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 279 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45418 deadline: 1733982139911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:19,956 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/1fb7eaa286424546b6076ec5a16ec77c 2024-12-12T05:41:19,964 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/7b3db271a0ec4c6fa7fdd86edb6e47aa as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/7b3db271a0ec4c6fa7fdd86edb6e47aa 2024-12-12T05:41:19,969 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/7b3db271a0ec4c6fa7fdd86edb6e47aa, entries=150, sequenceid=343, filesize=12.0 K 2024-12-12T05:41:19,970 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/dfe72559c269471fbc59e0a9ab8a9ef3 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/dfe72559c269471fbc59e0a9ab8a9ef3 2024-12-12T05:41:19,975 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/dfe72559c269471fbc59e0a9ab8a9ef3, entries=100, sequenceid=343, filesize=9.6 K 2024-12-12T05:41:19,975 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/1fb7eaa286424546b6076ec5a16ec77c as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/1fb7eaa286424546b6076ec5a16ec77c 2024-12-12T05:41:19,978 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/1fb7eaa286424546b6076ec5a16ec77c, entries=100, sequenceid=343, filesize=9.6 K 2024-12-12T05:41:19,979 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for ec02d2a34ba48290fed943fe35718728 in 1258ms, sequenceid=343, compaction requested=true 2024-12-12T05:41:19,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:19,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ec02d2a34ba48290fed943fe35718728:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:41:19,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:19,979 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:19,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ec02d2a34ba48290fed943fe35718728:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:41:19,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:19,979 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:19,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ec02d2a34ba48290fed943fe35718728:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:41:19,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:19,979 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37649 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:19,979 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35209 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:19,979 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): ec02d2a34ba48290fed943fe35718728/B is initiating minor compaction (all files) 2024-12-12T05:41:19,979 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): ec02d2a34ba48290fed943fe35718728/A is initiating minor compaction (all files) 2024-12-12T05:41:19,980 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ec02d2a34ba48290fed943fe35718728/A in TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:19,980 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ec02d2a34ba48290fed943fe35718728/B in TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:19,980 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/50f5ac9fbe38466bac334b635023d5bb, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/ddee4469c3f44bf8b77b1501c8be17a2, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/7b3db271a0ec4c6fa7fdd86edb6e47aa] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp, totalSize=36.8 K 2024-12-12T05:41:19,980 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/59f2e89698af4e0989439329af60da6b, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/71d1ac6b94a2404cb39a0f4569862122, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/dfe72559c269471fbc59e0a9ab8a9ef3] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp, totalSize=34.4 K 2024-12-12T05:41:19,980 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 50f5ac9fbe38466bac334b635023d5bb, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1733982076226 2024-12-12T05:41:19,980 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 59f2e89698af4e0989439329af60da6b, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1733982076226 2024-12-12T05:41:19,980 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting ddee4469c3f44bf8b77b1501c8be17a2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1733982076920 2024-12-12T05:41:19,980 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 71d1ac6b94a2404cb39a0f4569862122, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1733982076920 2024-12-12T05:41:19,980 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7b3db271a0ec4c6fa7fdd86edb6e47aa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1733982078102 2024-12-12T05:41:19,980 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting dfe72559c269471fbc59e0a9ab8a9ef3, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1733982078102 2024-12-12T05:41:19,985 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ec02d2a34ba48290fed943fe35718728#B#compaction#423 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:19,985 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ec02d2a34ba48290fed943fe35718728#A#compaction#424 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:19,986 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/60b7d9489228422aa1a1e59a11780d94 is 50, key is test_row_0/A:col10/1733982078719/Put/seqid=0 2024-12-12T05:41:19,986 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/a2f2e8fc1d504ac0bda4b826d5f24c2a is 50, key is test_row_0/B:col10/1733982078719/Put/seqid=0 2024-12-12T05:41:19,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742323_1499 (size=13153) 2024-12-12T05:41:19,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742322_1498 (size=13153) 2024-12-12T05:41:19,994 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/60b7d9489228422aa1a1e59a11780d94 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/60b7d9489228422aa1a1e59a11780d94 2024-12-12T05:41:19,997 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ec02d2a34ba48290fed943fe35718728/A of ec02d2a34ba48290fed943fe35718728 into 60b7d9489228422aa1a1e59a11780d94(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:19,997 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:19,997 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., storeName=ec02d2a34ba48290fed943fe35718728/A, priority=13, startTime=1733982079979; duration=0sec 2024-12-12T05:41:19,997 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:19,997 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ec02d2a34ba48290fed943fe35718728:A 2024-12-12T05:41:19,997 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:19,997 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35209 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:19,997 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): ec02d2a34ba48290fed943fe35718728/C is initiating minor compaction (all files) 2024-12-12T05:41:19,998 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ec02d2a34ba48290fed943fe35718728/C in TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:19,998 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/a13840853f9a418b94ffbad2e9e0c95d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/0b744c1f14ad40e0a0d40bbae306d95a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/1fb7eaa286424546b6076ec5a16ec77c] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp, totalSize=34.4 K 2024-12-12T05:41:19,998 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting a13840853f9a418b94ffbad2e9e0c95d, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1733982076226 2024-12-12T05:41:19,998 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0b744c1f14ad40e0a0d40bbae306d95a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1733982076920 2024-12-12T05:41:19,998 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1fb7eaa286424546b6076ec5a16ec77c, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1733982078102 2024-12-12T05:41:20,004 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ec02d2a34ba48290fed943fe35718728#C#compaction#425 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:20,004 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/cfb8ad83cb1847689f55da73f41f6b02 is 50, key is test_row_0/C:col10/1733982078719/Put/seqid=0 2024-12-12T05:41:20,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742324_1500 (size=13153) 2024-12-12T05:41:20,017 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:20,017 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-12T05:41:20,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:20,018 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing ec02d2a34ba48290fed943fe35718728 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T05:41:20,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=A 2024-12-12T05:41:20,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:20,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=B 2024-12-12T05:41:20,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:20,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=C 2024-12-12T05:41:20,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:20,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/a153c9728b72462e985c612c426554df is 50, key is test_row_0/A:col10/1733982078788/Put/seqid=0 2024-12-12T05:41:20,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742325_1501 (size=12301) 2024-12-12T05:41:20,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-12T05:41:20,400 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/a2f2e8fc1d504ac0bda4b826d5f24c2a as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/a2f2e8fc1d504ac0bda4b826d5f24c2a 2024-12-12T05:41:20,405 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ec02d2a34ba48290fed943fe35718728/B of ec02d2a34ba48290fed943fe35718728 into a2f2e8fc1d504ac0bda4b826d5f24c2a(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:20,405 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:20,405 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., storeName=ec02d2a34ba48290fed943fe35718728/B, priority=13, startTime=1733982079979; duration=0sec 2024-12-12T05:41:20,405 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:20,405 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ec02d2a34ba48290fed943fe35718728:B 2024-12-12T05:41:20,414 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/cfb8ad83cb1847689f55da73f41f6b02 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/cfb8ad83cb1847689f55da73f41f6b02 2024-12-12T05:41:20,420 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ec02d2a34ba48290fed943fe35718728/C of ec02d2a34ba48290fed943fe35718728 into cfb8ad83cb1847689f55da73f41f6b02(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:20,420 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:20,420 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728., storeName=ec02d2a34ba48290fed943fe35718728/C, priority=13, startTime=1733982079979; duration=0sec 2024-12-12T05:41:20,420 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:20,420 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ec02d2a34ba48290fed943fe35718728:C 2024-12-12T05:41:20,425 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/a153c9728b72462e985c612c426554df 2024-12-12T05:41:20,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/87ff110865ef47ecbba640c386473675 is 50, key is test_row_0/B:col10/1733982078788/Put/seqid=0 2024-12-12T05:41:20,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742326_1502 (size=12301) 2024-12-12T05:41:20,839 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/87ff110865ef47ecbba640c386473675 2024-12-12T05:41:20,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/3d3e11ef742e420591c79cf870a1c0f0 is 50, key is test_row_0/C:col10/1733982078788/Put/seqid=0 2024-12-12T05:41:20,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742327_1503 (size=12301) 2024-12-12T05:41:20,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on ec02d2a34ba48290fed943fe35718728 2024-12-12T05:41:20,917 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. as already flushing 2024-12-12T05:41:20,918 DEBUG [Thread-1902 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d672ed2 to 127.0.0.1:60303 2024-12-12T05:41:20,918 DEBUG [Thread-1902 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:41:21,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-12T05:41:21,257 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/3d3e11ef742e420591c79cf870a1c0f0 2024-12-12T05:41:21,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/a153c9728b72462e985c612c426554df as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/a153c9728b72462e985c612c426554df 2024-12-12T05:41:21,265 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/a153c9728b72462e985c612c426554df, entries=150, sequenceid=368, filesize=12.0 K 2024-12-12T05:41:21,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/87ff110865ef47ecbba640c386473675 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/87ff110865ef47ecbba640c386473675 2024-12-12T05:41:21,270 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/87ff110865ef47ecbba640c386473675, entries=150, sequenceid=368, filesize=12.0 K 2024-12-12T05:41:21,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/3d3e11ef742e420591c79cf870a1c0f0 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/3d3e11ef742e420591c79cf870a1c0f0 2024-12-12T05:41:21,276 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/3d3e11ef742e420591c79cf870a1c0f0, entries=150, sequenceid=368, filesize=12.0 K 2024-12-12T05:41:21,276 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=6.71 KB/6870 for ec02d2a34ba48290fed943fe35718728 in 1258ms, sequenceid=368, compaction requested=false 2024-12-12T05:41:21,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:21,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:21,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-12-12T05:41:21,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-12-12T05:41:21,279 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-12-12T05:41:21,279 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3350 sec 2024-12-12T05:41:21,281 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 2.3380 sec 2024-12-12T05:41:23,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-12T05:41:23,051 INFO [Thread-1906 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-12-12T05:41:27,219 DEBUG [Thread-1904 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7cf40102 to 127.0.0.1:60303 2024-12-12T05:41:27,219 DEBUG [Thread-1904 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:41:27,220 DEBUG [Thread-1900 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x34b30c39 to 127.0.0.1:60303 2024-12-12T05:41:27,221 DEBUG [Thread-1900 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:41:27,236 DEBUG [Thread-1896 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7043f683 to 127.0.0.1:60303 2024-12-12T05:41:27,236 DEBUG [Thread-1896 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:41:27,258 DEBUG [Thread-1898 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2b0c2472 to 127.0.0.1:60303 2024-12-12T05:41:27,258 DEBUG [Thread-1898 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:41:27,258 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-12T05:41:27,258 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 36 2024-12-12T05:41:27,258 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 41 2024-12-12T05:41:27,259 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 31 2024-12-12T05:41:27,259 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 160 2024-12-12T05:41:27,259 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 25 2024-12-12T05:41:27,259 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-12T05:41:27,259 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-12T05:41:27,259 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3409 2024-12-12T05:41:27,259 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10227 rows 2024-12-12T05:41:27,259 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3384 2024-12-12T05:41:27,259 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10152 rows 2024-12-12T05:41:27,259 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3397 2024-12-12T05:41:27,259 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10191 rows 2024-12-12T05:41:27,259 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3395 2024-12-12T05:41:27,259 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10185 rows 2024-12-12T05:41:27,259 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3386 2024-12-12T05:41:27,259 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10158 rows 2024-12-12T05:41:27,259 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-12T05:41:27,259 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x61d38088 to 127.0.0.1:60303 2024-12-12T05:41:27,260 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:41:27,262 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-12T05:41:27,263 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-12T05:41:27,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-12T05:41:27,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-12T05:41:27,267 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733982087267"}]},"ts":"1733982087267"} 2024-12-12T05:41:27,268 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-12T05:41:27,284 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-12T05:41:27,285 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T05:41:27,286 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=142, ppid=141, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ec02d2a34ba48290fed943fe35718728, UNASSIGN}] 2024-12-12T05:41:27,287 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ec02d2a34ba48290fed943fe35718728, UNASSIGN 2024-12-12T05:41:27,287 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=142 updating hbase:meta row=ec02d2a34ba48290fed943fe35718728, regionState=CLOSING, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:27,288 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T05:41:27,289 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; CloseRegionProcedure ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566}] 2024-12-12T05:41:27,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-12T05:41:27,440 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:27,441 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] handler.UnassignRegionHandler(124): Close ec02d2a34ba48290fed943fe35718728 2024-12-12T05:41:27,441 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T05:41:27,442 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1681): Closing ec02d2a34ba48290fed943fe35718728, disabling compactions & flushes 2024-12-12T05:41:27,442 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:27,442 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:27,442 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. after waiting 0 ms 2024-12-12T05:41:27,442 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:27,442 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(2837): Flushing ec02d2a34ba48290fed943fe35718728 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-12T05:41:27,443 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=A 2024-12-12T05:41:27,443 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:27,443 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=B 2024-12-12T05:41:27,443 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:27,443 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ec02d2a34ba48290fed943fe35718728, store=C 2024-12-12T05:41:27,443 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:27,450 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/e9eca341220042caa0f9533918497f36 is 50, key is test_row_0/A:col10/1733982087255/Put/seqid=0 2024-12-12T05:41:27,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742328_1504 (size=12301) 2024-12-12T05:41:27,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-12T05:41:27,857 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=378 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/e9eca341220042caa0f9533918497f36 2024-12-12T05:41:27,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-12T05:41:27,871 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/649075113f134e02840c07f72ded8dc2 is 50, key is test_row_0/B:col10/1733982087255/Put/seqid=0 2024-12-12T05:41:27,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742329_1505 (size=12301) 2024-12-12T05:41:28,276 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=378 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/649075113f134e02840c07f72ded8dc2 2024-12-12T05:41:28,287 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/958ef9c7a753403eb48fa65706bce4d4 is 50, key is test_row_0/C:col10/1733982087255/Put/seqid=0 2024-12-12T05:41:28,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742330_1506 (size=12301) 2024-12-12T05:41:28,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-12T05:41:28,691 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=378 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/958ef9c7a753403eb48fa65706bce4d4 2024-12-12T05:41:28,696 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/A/e9eca341220042caa0f9533918497f36 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/e9eca341220042caa0f9533918497f36 2024-12-12T05:41:28,701 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/e9eca341220042caa0f9533918497f36, entries=150, sequenceid=378, filesize=12.0 K 2024-12-12T05:41:28,702 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/B/649075113f134e02840c07f72ded8dc2 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/649075113f134e02840c07f72ded8dc2 2024-12-12T05:41:28,705 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/649075113f134e02840c07f72ded8dc2, entries=150, sequenceid=378, filesize=12.0 K 2024-12-12T05:41:28,706 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/.tmp/C/958ef9c7a753403eb48fa65706bce4d4 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/958ef9c7a753403eb48fa65706bce4d4 2024-12-12T05:41:28,709 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/958ef9c7a753403eb48fa65706bce4d4, entries=150, sequenceid=378, filesize=12.0 K 2024-12-12T05:41:28,710 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for ec02d2a34ba48290fed943fe35718728 in 1268ms, sequenceid=378, compaction requested=true 2024-12-12T05:41:28,711 DEBUG [StoreCloser-TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/4f213a1f835e4c9ca734fb750c000d6e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/d3ffab59d01c4f6f9da8fe8d4c24c05a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/0065d168bc1c4731bd01a4201da0e950, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/5c32f838800645e08963df498ed20f4b, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/02d594fac7e74b698269c3b9c95cc748, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/eff4b95551b3490dad790ea80f68486a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/3bf15963ed9d44f7a57e253e8d295381, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/213a3ab9a9d040e5ac993bcac23fbc64, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/607cba0a106d4081958d05592486dcbd, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/f66da54f255d472ab00e38e9731c6ab7, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/80e40da0edc241a4923ee516bc4cca7f, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/b549b62a90b24add90ae2e94f45d2a89, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/5cc4f89fa1b143a28c364cb146ad1caf, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/85905f825d8c488183edd74514392658, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/9a018a296cde4b7099057aa2fd47d650, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/342977bd011945adbb10433ee63fd58f, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/6afea58b6de64c1eaa2603c0c1a68d70, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/735aea51d776443599f8330fb7cae219, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/58830330c4fc41daa8a8c263d5052b5c, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/2fd27e9e83b1437693c61b36742561a6, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/7eaa38e017c34855a80dcc67663d1039, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/f6171c6091e045448de9130f11ecf633, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/50f5ac9fbe38466bac334b635023d5bb, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/ddee4469c3f44bf8b77b1501c8be17a2, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/7b3db271a0ec4c6fa7fdd86edb6e47aa] to archive 2024-12-12T05:41:28,712 DEBUG [StoreCloser-TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T05:41:28,714 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/4f213a1f835e4c9ca734fb750c000d6e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/4f213a1f835e4c9ca734fb750c000d6e 2024-12-12T05:41:28,714 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/d3ffab59d01c4f6f9da8fe8d4c24c05a to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/d3ffab59d01c4f6f9da8fe8d4c24c05a 2024-12-12T05:41:28,714 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/0065d168bc1c4731bd01a4201da0e950 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/0065d168bc1c4731bd01a4201da0e950 2024-12-12T05:41:28,715 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/5c32f838800645e08963df498ed20f4b to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/5c32f838800645e08963df498ed20f4b 2024-12-12T05:41:28,715 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/02d594fac7e74b698269c3b9c95cc748 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/02d594fac7e74b698269c3b9c95cc748 2024-12-12T05:41:28,715 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/3bf15963ed9d44f7a57e253e8d295381 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/3bf15963ed9d44f7a57e253e8d295381 2024-12-12T05:41:28,715 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/213a3ab9a9d040e5ac993bcac23fbc64 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/213a3ab9a9d040e5ac993bcac23fbc64 2024-12-12T05:41:28,716 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/80e40da0edc241a4923ee516bc4cca7f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/80e40da0edc241a4923ee516bc4cca7f 2024-12-12T05:41:28,716 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/b549b62a90b24add90ae2e94f45d2a89 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/b549b62a90b24add90ae2e94f45d2a89 2024-12-12T05:41:28,716 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/607cba0a106d4081958d05592486dcbd to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/607cba0a106d4081958d05592486dcbd 2024-12-12T05:41:28,716 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/eff4b95551b3490dad790ea80f68486a to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/eff4b95551b3490dad790ea80f68486a 2024-12-12T05:41:28,716 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/f66da54f255d472ab00e38e9731c6ab7 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/f66da54f255d472ab00e38e9731c6ab7 2024-12-12T05:41:28,717 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/5cc4f89fa1b143a28c364cb146ad1caf to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/5cc4f89fa1b143a28c364cb146ad1caf 2024-12-12T05:41:28,717 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/85905f825d8c488183edd74514392658 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/85905f825d8c488183edd74514392658 2024-12-12T05:41:28,717 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/9a018a296cde4b7099057aa2fd47d650 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/9a018a296cde4b7099057aa2fd47d650 2024-12-12T05:41:28,718 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/342977bd011945adbb10433ee63fd58f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/342977bd011945adbb10433ee63fd58f 2024-12-12T05:41:28,718 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/735aea51d776443599f8330fb7cae219 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/735aea51d776443599f8330fb7cae219 2024-12-12T05:41:28,718 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/58830330c4fc41daa8a8c263d5052b5c to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/58830330c4fc41daa8a8c263d5052b5c 2024-12-12T05:41:28,718 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/6afea58b6de64c1eaa2603c0c1a68d70 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/6afea58b6de64c1eaa2603c0c1a68d70 2024-12-12T05:41:28,718 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/2fd27e9e83b1437693c61b36742561a6 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/2fd27e9e83b1437693c61b36742561a6 2024-12-12T05:41:28,719 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/7eaa38e017c34855a80dcc67663d1039 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/7eaa38e017c34855a80dcc67663d1039 2024-12-12T05:41:28,719 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/f6171c6091e045448de9130f11ecf633 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/f6171c6091e045448de9130f11ecf633 2024-12-12T05:41:28,719 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/50f5ac9fbe38466bac334b635023d5bb to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/50f5ac9fbe38466bac334b635023d5bb 2024-12-12T05:41:28,719 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/ddee4469c3f44bf8b77b1501c8be17a2 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/ddee4469c3f44bf8b77b1501c8be17a2 2024-12-12T05:41:28,719 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/7b3db271a0ec4c6fa7fdd86edb6e47aa to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/7b3db271a0ec4c6fa7fdd86edb6e47aa 2024-12-12T05:41:28,720 DEBUG [StoreCloser-TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/75617546cd6442838b579b290e9af670, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/756eb8a0ece64eba8803b3bfb36fe4ba, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/88c67e94385a403585ea73648fa4f49e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/192fb585f3d24cf3a17ddd2e555ffff4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/07f461a4ddcd49799f12185b085846aa, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/beaea3d0350b49a093ada34ce94ad7a4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/95eb5135d1cc4100b54c44901f419019, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/f9755624a5dd40fc9538cf76d34ece74, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/7eed6bf1467d425ca6fcf60e48554fe1, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/5e89dc537461469b9790dfc154f7414c, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/2f112711faf0474d89b89cd47ca26389, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/5b301a990b3e4a22a1817cff6150abfa, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/0b0ad5b753714f31b1cd89dfa4b13c5a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/23fffdb49373453fa0f944a1f5b2a78f, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/08075e5a23c34612817515f31c7dcc97, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/fe084169703f4999a272e9e84ca8adda, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/c3d2c4db09294fd9b3620c4657a1e71e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/07339740d54c4af8b67467f0cff8f153, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/b7fdcc6b87ee4fa2b98f387a2585b692, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/13108b889e66414d97a449cb5828be40, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/f093ab89c1d34675b1c15ffb62248b4d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/59f2e89698af4e0989439329af60da6b, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/ba5be885e2d8413e9ad1b2179a25498d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/71d1ac6b94a2404cb39a0f4569862122, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/dfe72559c269471fbc59e0a9ab8a9ef3] to archive 2024-12-12T05:41:28,721 DEBUG [StoreCloser-TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T05:41:28,723 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/75617546cd6442838b579b290e9af670 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/75617546cd6442838b579b290e9af670 2024-12-12T05:41:28,723 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/756eb8a0ece64eba8803b3bfb36fe4ba to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/756eb8a0ece64eba8803b3bfb36fe4ba 2024-12-12T05:41:28,723 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/192fb585f3d24cf3a17ddd2e555ffff4 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/192fb585f3d24cf3a17ddd2e555ffff4 2024-12-12T05:41:28,723 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/88c67e94385a403585ea73648fa4f49e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/88c67e94385a403585ea73648fa4f49e 2024-12-12T05:41:28,723 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/beaea3d0350b49a093ada34ce94ad7a4 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/beaea3d0350b49a093ada34ce94ad7a4 2024-12-12T05:41:28,723 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/07f461a4ddcd49799f12185b085846aa to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/07f461a4ddcd49799f12185b085846aa 2024-12-12T05:41:28,723 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/f9755624a5dd40fc9538cf76d34ece74 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/f9755624a5dd40fc9538cf76d34ece74 2024-12-12T05:41:28,723 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/95eb5135d1cc4100b54c44901f419019 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/95eb5135d1cc4100b54c44901f419019 2024-12-12T05:41:28,724 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/7eed6bf1467d425ca6fcf60e48554fe1 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/7eed6bf1467d425ca6fcf60e48554fe1 2024-12-12T05:41:28,724 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/5e89dc537461469b9790dfc154f7414c to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/5e89dc537461469b9790dfc154f7414c 2024-12-12T05:41:28,724 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/5b301a990b3e4a22a1817cff6150abfa to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/5b301a990b3e4a22a1817cff6150abfa 2024-12-12T05:41:28,724 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/08075e5a23c34612817515f31c7dcc97 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/08075e5a23c34612817515f31c7dcc97 2024-12-12T05:41:28,724 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/2f112711faf0474d89b89cd47ca26389 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/2f112711faf0474d89b89cd47ca26389 2024-12-12T05:41:28,724 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/23fffdb49373453fa0f944a1f5b2a78f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/23fffdb49373453fa0f944a1f5b2a78f 2024-12-12T05:41:28,724 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/fe084169703f4999a272e9e84ca8adda to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/fe084169703f4999a272e9e84ca8adda 2024-12-12T05:41:28,725 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/0b0ad5b753714f31b1cd89dfa4b13c5a to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/0b0ad5b753714f31b1cd89dfa4b13c5a 2024-12-12T05:41:28,725 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/c3d2c4db09294fd9b3620c4657a1e71e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/c3d2c4db09294fd9b3620c4657a1e71e 2024-12-12T05:41:28,725 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/07339740d54c4af8b67467f0cff8f153 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/07339740d54c4af8b67467f0cff8f153 2024-12-12T05:41:28,725 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/13108b889e66414d97a449cb5828be40 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/13108b889e66414d97a449cb5828be40 2024-12-12T05:41:28,725 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/f093ab89c1d34675b1c15ffb62248b4d to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/f093ab89c1d34675b1c15ffb62248b4d 2024-12-12T05:41:28,725 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/59f2e89698af4e0989439329af60da6b to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/59f2e89698af4e0989439329af60da6b 2024-12-12T05:41:28,725 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/b7fdcc6b87ee4fa2b98f387a2585b692 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/b7fdcc6b87ee4fa2b98f387a2585b692 2024-12-12T05:41:28,726 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/ba5be885e2d8413e9ad1b2179a25498d to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/ba5be885e2d8413e9ad1b2179a25498d 2024-12-12T05:41:28,726 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/71d1ac6b94a2404cb39a0f4569862122 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/71d1ac6b94a2404cb39a0f4569862122 2024-12-12T05:41:28,726 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/dfe72559c269471fbc59e0a9ab8a9ef3 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/dfe72559c269471fbc59e0a9ab8a9ef3 2024-12-12T05:41:28,727 DEBUG [StoreCloser-TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/acaf7a38405743c9b6b634b62e658555, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/38f88709476446da803163a441274b72, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/5e0f7b8e4ce14bb9851dffc230da5781, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/d86e13ac010b4e37896812465711ae61, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/3038721b9d4f47ffa9422eeb13c25fb0, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/af33e9ff810f484fbfe3672d3fe5cae7, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/eb6af930fed4468a8540bee85a41a511, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/10415e1317fc420eb54f6bbbd223c5ab, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/13980602b6ed42439667eb86b1e1c2eb, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/d7b58396fd324020907c1572a0749cfd, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/00b9d7aba66049e7874dcdb402d4dcc9, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/5aff2499b3ea472cb0fb8e9ea19e29d8, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/78cb4438e6534888b31a712e31db8ff0, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/cbd9776c957c4e9b8d1f703ec2e69e5c, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/ea6023daaa1b4941bff43f1895c550b1, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/2c516aa4b48c4169a12a0b5d5c486951, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/3c9b70c63a0a4fb181a5a915d8a540dc, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/2445cb3b003d4389a78fdcc2ade47f5b, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/3f47bbaff6ef442e86526a02949d0a5e, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/f210f605aa544bc9815d2afccae068b2, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/0a0fc3ec788243ebb8637116983dc1b1, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/a13840853f9a418b94ffbad2e9e0c95d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/ac5f6566927942698b3d51db2b52029b, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/0b744c1f14ad40e0a0d40bbae306d95a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/1fb7eaa286424546b6076ec5a16ec77c] to archive 2024-12-12T05:41:28,727 DEBUG [StoreCloser-TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T05:41:28,728 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/acaf7a38405743c9b6b634b62e658555 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/acaf7a38405743c9b6b634b62e658555 2024-12-12T05:41:28,728 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/38f88709476446da803163a441274b72 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/38f88709476446da803163a441274b72 2024-12-12T05:41:28,729 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/5e0f7b8e4ce14bb9851dffc230da5781 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/5e0f7b8e4ce14bb9851dffc230da5781 2024-12-12T05:41:28,729 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/d86e13ac010b4e37896812465711ae61 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/d86e13ac010b4e37896812465711ae61 2024-12-12T05:41:28,729 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/10415e1317fc420eb54f6bbbd223c5ab to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/10415e1317fc420eb54f6bbbd223c5ab 2024-12-12T05:41:28,729 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/3038721b9d4f47ffa9422eeb13c25fb0 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/3038721b9d4f47ffa9422eeb13c25fb0 2024-12-12T05:41:28,729 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/af33e9ff810f484fbfe3672d3fe5cae7 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/af33e9ff810f484fbfe3672d3fe5cae7 2024-12-12T05:41:28,729 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/eb6af930fed4468a8540bee85a41a511 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/eb6af930fed4468a8540bee85a41a511 2024-12-12T05:41:28,729 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/13980602b6ed42439667eb86b1e1c2eb to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/13980602b6ed42439667eb86b1e1c2eb 2024-12-12T05:41:28,730 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/d7b58396fd324020907c1572a0749cfd to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/d7b58396fd324020907c1572a0749cfd 2024-12-12T05:41:28,730 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/00b9d7aba66049e7874dcdb402d4dcc9 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/00b9d7aba66049e7874dcdb402d4dcc9 2024-12-12T05:41:28,730 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/78cb4438e6534888b31a712e31db8ff0 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/78cb4438e6534888b31a712e31db8ff0 2024-12-12T05:41:28,730 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/5aff2499b3ea472cb0fb8e9ea19e29d8 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/5aff2499b3ea472cb0fb8e9ea19e29d8 2024-12-12T05:41:28,730 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/2c516aa4b48c4169a12a0b5d5c486951 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/2c516aa4b48c4169a12a0b5d5c486951 2024-12-12T05:41:28,730 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/ea6023daaa1b4941bff43f1895c550b1 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/ea6023daaa1b4941bff43f1895c550b1 2024-12-12T05:41:28,730 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/cbd9776c957c4e9b8d1f703ec2e69e5c to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/cbd9776c957c4e9b8d1f703ec2e69e5c 2024-12-12T05:41:28,731 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/3c9b70c63a0a4fb181a5a915d8a540dc to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/3c9b70c63a0a4fb181a5a915d8a540dc 2024-12-12T05:41:28,731 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/3f47bbaff6ef442e86526a02949d0a5e to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/3f47bbaff6ef442e86526a02949d0a5e 2024-12-12T05:41:28,731 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/2445cb3b003d4389a78fdcc2ade47f5b to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/2445cb3b003d4389a78fdcc2ade47f5b 2024-12-12T05:41:28,731 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/ac5f6566927942698b3d51db2b52029b to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/ac5f6566927942698b3d51db2b52029b 2024-12-12T05:41:28,731 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/f210f605aa544bc9815d2afccae068b2 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/f210f605aa544bc9815d2afccae068b2 2024-12-12T05:41:28,731 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/a13840853f9a418b94ffbad2e9e0c95d to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/a13840853f9a418b94ffbad2e9e0c95d 2024-12-12T05:41:28,731 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/0a0fc3ec788243ebb8637116983dc1b1 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/0a0fc3ec788243ebb8637116983dc1b1 2024-12-12T05:41:28,731 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/0b744c1f14ad40e0a0d40bbae306d95a to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/0b744c1f14ad40e0a0d40bbae306d95a 2024-12-12T05:41:28,732 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/1fb7eaa286424546b6076ec5a16ec77c to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/1fb7eaa286424546b6076ec5a16ec77c 2024-12-12T05:41:28,734 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/recovered.edits/381.seqid, newMaxSeqId=381, maxSeqId=1 2024-12-12T05:41:28,735 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728. 2024-12-12T05:41:28,735 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1635): Region close journal for ec02d2a34ba48290fed943fe35718728: 2024-12-12T05:41:28,736 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] handler.UnassignRegionHandler(170): Closed ec02d2a34ba48290fed943fe35718728 2024-12-12T05:41:28,736 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=142 updating hbase:meta row=ec02d2a34ba48290fed943fe35718728, regionState=CLOSED 2024-12-12T05:41:28,738 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-12-12T05:41:28,738 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; CloseRegionProcedure ec02d2a34ba48290fed943fe35718728, server=83e80bf221ca,46457,1733981928566 in 1.4480 sec 2024-12-12T05:41:28,739 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=142, resume processing ppid=141 2024-12-12T05:41:28,739 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, ppid=141, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=ec02d2a34ba48290fed943fe35718728, UNASSIGN in 1.4520 sec 2024-12-12T05:41:28,740 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-12-12T05:41:28,740 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4540 sec 2024-12-12T05:41:28,740 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733982088740"}]},"ts":"1733982088740"} 2024-12-12T05:41:28,741 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-12T05:41:28,791 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-12T05:41:28,793 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5290 sec 2024-12-12T05:41:29,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-12T05:41:29,373 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-12-12T05:41:29,375 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-12T05:41:29,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:41:29,379 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=144, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:41:29,380 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=144, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:41:29,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-12T05:41:29,384 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728 2024-12-12T05:41:29,387 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A, FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B, FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C, FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/recovered.edits] 2024-12-12T05:41:29,389 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/60b7d9489228422aa1a1e59a11780d94 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/60b7d9489228422aa1a1e59a11780d94 2024-12-12T05:41:29,389 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/e9eca341220042caa0f9533918497f36 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/e9eca341220042caa0f9533918497f36 2024-12-12T05:41:29,389 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/a153c9728b72462e985c612c426554df to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/A/a153c9728b72462e985c612c426554df 2024-12-12T05:41:29,391 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/649075113f134e02840c07f72ded8dc2 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/649075113f134e02840c07f72ded8dc2 2024-12-12T05:41:29,391 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/87ff110865ef47ecbba640c386473675 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/87ff110865ef47ecbba640c386473675 2024-12-12T05:41:29,391 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/a2f2e8fc1d504ac0bda4b826d5f24c2a to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/B/a2f2e8fc1d504ac0bda4b826d5f24c2a 2024-12-12T05:41:29,393 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/958ef9c7a753403eb48fa65706bce4d4 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/958ef9c7a753403eb48fa65706bce4d4 2024-12-12T05:41:29,393 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/cfb8ad83cb1847689f55da73f41f6b02 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/cfb8ad83cb1847689f55da73f41f6b02 2024-12-12T05:41:29,393 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/3d3e11ef742e420591c79cf870a1c0f0 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/C/3d3e11ef742e420591c79cf870a1c0f0 2024-12-12T05:41:29,395 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/recovered.edits/381.seqid to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728/recovered.edits/381.seqid 2024-12-12T05:41:29,395 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/ec02d2a34ba48290fed943fe35718728 2024-12-12T05:41:29,395 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-12T05:41:29,397 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=144, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:41:29,398 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-12T05:41:29,399 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-12T05:41:29,400 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=144, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:41:29,400 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-12T05:41:29,400 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733982089400"}]},"ts":"9223372036854775807"} 2024-12-12T05:41:29,402 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-12T05:41:29,402 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => ec02d2a34ba48290fed943fe35718728, NAME => 'TestAcidGuarantees,,1733982056945.ec02d2a34ba48290fed943fe35718728.', STARTKEY => '', ENDKEY => ''}] 2024-12-12T05:41:29,402 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-12T05:41:29,402 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733982089402"}]},"ts":"9223372036854775807"} 2024-12-12T05:41:29,403 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-12T05:41:29,442 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=144, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:41:29,443 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 67 msec 2024-12-12T05:41:29,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-12T05:41:29,482 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-12-12T05:41:29,499 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=245 (was 245), OpenFileDescriptor=448 (was 446) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=287 (was 259) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=13235 (was 13255) 2024-12-12T05:41:29,506 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=245, OpenFileDescriptor=448, MaxFileDescriptor=1048576, SystemLoadAverage=287, ProcessCount=11, AvailableMemoryMB=13234 2024-12-12T05:41:29,507 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T05:41:29,507 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T05:41:29,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=145, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-12T05:41:29,509 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T05:41:29,509 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:29,509 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 145 2024-12-12T05:41:29,510 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T05:41:29,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-12T05:41:29,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742331_1507 (size=963) 2024-12-12T05:41:29,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-12T05:41:29,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-12T05:41:29,920 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d 2024-12-12T05:41:29,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742332_1508 (size=53) 2024-12-12T05:41:30,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-12T05:41:30,346 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:41:30,346 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 7af3549cbdbd66c1f5a0c758d39edf04, disabling compactions & flushes 2024-12-12T05:41:30,346 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:30,346 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:30,346 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. after waiting 0 ms 2024-12-12T05:41:30,346 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:30,347 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:30,347 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:30,349 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T05:41:30,350 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733982090349"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733982090349"}]},"ts":"1733982090349"} 2024-12-12T05:41:30,351 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T05:41:30,352 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T05:41:30,353 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733982090352"}]},"ts":"1733982090352"} 2024-12-12T05:41:30,354 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-12T05:41:30,418 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7af3549cbdbd66c1f5a0c758d39edf04, ASSIGN}] 2024-12-12T05:41:30,421 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7af3549cbdbd66c1f5a0c758d39edf04, ASSIGN 2024-12-12T05:41:30,422 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=7af3549cbdbd66c1f5a0c758d39edf04, ASSIGN; state=OFFLINE, location=83e80bf221ca,46457,1733981928566; forceNewPlan=false, retain=false 2024-12-12T05:41:30,573 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=7af3549cbdbd66c1f5a0c758d39edf04, regionState=OPENING, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:30,576 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; OpenRegionProcedure 7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566}] 2024-12-12T05:41:30,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-12T05:41:30,729 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:30,736 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:30,736 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7285): Opening region: {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} 2024-12-12T05:41:30,737 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:30,737 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:41:30,737 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7327): checking encryption for 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:30,737 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7330): checking classloading for 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:30,739 INFO [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:30,740 INFO [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T05:41:30,740 INFO [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7af3549cbdbd66c1f5a0c758d39edf04 columnFamilyName A 2024-12-12T05:41:30,741 DEBUG [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:30,741 INFO [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] regionserver.HStore(327): Store=7af3549cbdbd66c1f5a0c758d39edf04/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:41:30,741 INFO [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:30,743 INFO [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T05:41:30,743 INFO [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7af3549cbdbd66c1f5a0c758d39edf04 columnFamilyName B 2024-12-12T05:41:30,743 DEBUG [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:30,743 INFO [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] regionserver.HStore(327): Store=7af3549cbdbd66c1f5a0c758d39edf04/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:41:30,743 INFO [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:30,744 INFO [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T05:41:30,744 INFO [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7af3549cbdbd66c1f5a0c758d39edf04 columnFamilyName C 2024-12-12T05:41:30,744 DEBUG [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:30,745 INFO [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] regionserver.HStore(327): Store=7af3549cbdbd66c1f5a0c758d39edf04/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:41:30,745 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:30,745 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:30,746 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:30,747 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T05:41:30,748 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1085): writing seq id for 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:30,749 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T05:41:30,749 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1102): Opened 7af3549cbdbd66c1f5a0c758d39edf04; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65670059, jitterRate=-0.021439865231513977}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T05:41:30,750 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1001): Region open journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:30,750 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04., pid=147, masterSystemTime=1733982090729 2024-12-12T05:41:30,751 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:30,751 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:30,752 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=7af3549cbdbd66c1f5a0c758d39edf04, regionState=OPEN, openSeqNum=2, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:30,753 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-12-12T05:41:30,753 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; OpenRegionProcedure 7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 in 176 msec 2024-12-12T05:41:30,754 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=146, resume processing ppid=145 2024-12-12T05:41:30,754 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, ppid=145, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7af3549cbdbd66c1f5a0c758d39edf04, ASSIGN in 335 msec 2024-12-12T05:41:30,754 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T05:41:30,755 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733982090754"}]},"ts":"1733982090754"} 2024-12-12T05:41:30,755 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-12T05:41:30,817 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T05:41:30,819 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.3100 sec 2024-12-12T05:41:31,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-12T05:41:31,620 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 145 completed 2024-12-12T05:41:31,622 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2d7fe431 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@60d631a3 2024-12-12T05:41:31,689 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69abefea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:41:31,691 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:41:31,692 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51076, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:41:31,693 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T05:41:31,694 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56988, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T05:41:31,696 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T05:41:31,696 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T05:41:31,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-12T05:41:31,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742333_1509 (size=999) 2024-12-12T05:41:32,110 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-12T05:41:32,110 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-12T05:41:32,115 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T05:41:32,120 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7af3549cbdbd66c1f5a0c758d39edf04, REOPEN/MOVE}] 2024-12-12T05:41:32,121 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7af3549cbdbd66c1f5a0c758d39edf04, REOPEN/MOVE 2024-12-12T05:41:32,122 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=7af3549cbdbd66c1f5a0c758d39edf04, regionState=CLOSING, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:32,123 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T05:41:32,123 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE; CloseRegionProcedure 7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566}] 2024-12-12T05:41:32,274 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:32,274 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(124): Close 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:32,274 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T05:41:32,274 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1681): Closing 7af3549cbdbd66c1f5a0c758d39edf04, disabling compactions & flushes 2024-12-12T05:41:32,274 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:32,274 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:32,274 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. after waiting 0 ms 2024-12-12T05:41:32,274 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:32,278 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-12T05:41:32,279 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:32,279 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1635): Region close journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:32,279 WARN [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionServer(3786): Not adding moved region record: 7af3549cbdbd66c1f5a0c758d39edf04 to self. 2024-12-12T05:41:32,280 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(170): Closed 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:32,281 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=7af3549cbdbd66c1f5a0c758d39edf04, regionState=CLOSED 2024-12-12T05:41:32,283 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-12-12T05:41:32,283 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; CloseRegionProcedure 7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 in 159 msec 2024-12-12T05:41:32,284 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=7af3549cbdbd66c1f5a0c758d39edf04, REOPEN/MOVE; state=CLOSED, location=83e80bf221ca,46457,1733981928566; forceNewPlan=false, retain=true 2024-12-12T05:41:32,434 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=7af3549cbdbd66c1f5a0c758d39edf04, regionState=OPENING, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:32,437 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=152, ppid=150, state=RUNNABLE; OpenRegionProcedure 7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566}] 2024-12-12T05:41:32,590 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:32,595 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:32,595 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(7285): Opening region: {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} 2024-12-12T05:41:32,596 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:32,596 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T05:41:32,596 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(7327): checking encryption for 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:32,596 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(7330): checking classloading for 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:32,598 INFO [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:32,599 INFO [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T05:41:32,600 INFO [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7af3549cbdbd66c1f5a0c758d39edf04 columnFamilyName A 2024-12-12T05:41:32,602 DEBUG [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:32,603 INFO [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] regionserver.HStore(327): Store=7af3549cbdbd66c1f5a0c758d39edf04/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:41:32,603 INFO [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:32,604 INFO [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T05:41:32,604 INFO [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7af3549cbdbd66c1f5a0c758d39edf04 columnFamilyName B 2024-12-12T05:41:32,604 DEBUG [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:32,605 INFO [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] regionserver.HStore(327): Store=7af3549cbdbd66c1f5a0c758d39edf04/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:41:32,605 INFO [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:32,605 INFO [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T05:41:32,606 INFO [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7af3549cbdbd66c1f5a0c758d39edf04 columnFamilyName C 2024-12-12T05:41:32,606 DEBUG [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:32,606 INFO [StoreOpener-7af3549cbdbd66c1f5a0c758d39edf04-1 {}] regionserver.HStore(327): Store=7af3549cbdbd66c1f5a0c758d39edf04/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T05:41:32,606 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:32,607 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:32,608 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:32,610 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T05:41:32,612 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(1085): writing seq id for 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:32,613 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(1102): Opened 7af3549cbdbd66c1f5a0c758d39edf04; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72857604, jitterRate=0.08566290140151978}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T05:41:32,614 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(1001): Region open journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:32,614 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04., pid=152, masterSystemTime=1733982092590 2024-12-12T05:41:32,616 DEBUG [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:32,616 INFO [RS_OPEN_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:32,616 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=7af3549cbdbd66c1f5a0c758d39edf04, regionState=OPEN, openSeqNum=5, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:32,618 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=152, resume processing ppid=150 2024-12-12T05:41:32,618 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, ppid=150, state=SUCCESS; OpenRegionProcedure 7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 in 180 msec 2024-12-12T05:41:32,619 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=149 2024-12-12T05:41:32,620 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=149, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7af3549cbdbd66c1f5a0c758d39edf04, REOPEN/MOVE in 498 msec 2024-12-12T05:41:32,621 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-12-12T05:41:32,621 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 505 msec 2024-12-12T05:41:32,623 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 925 msec 2024-12-12T05:41:32,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-12T05:41:32,625 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x091d72db to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@58971172 2024-12-12T05:41:32,676 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e757135, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:41:32,677 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5d836f78 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d7fe93b 2024-12-12T05:41:32,685 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7846cb78, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:41:32,687 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53305d9b to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11c440f7 2024-12-12T05:41:32,700 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f1754bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:41:32,700 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6bb6288a to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@58460ef3 2024-12-12T05:41:32,710 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d9113f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:41:32,710 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06556601 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e8cd1ae 2024-12-12T05:41:32,718 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bb75907, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:41:32,719 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4d832d43 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c1d3a95 2024-12-12T05:41:32,727 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50bf224f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:41:32,727 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x15b6349f to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@503a7d2e 2024-12-12T05:41:32,735 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79be903c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:41:32,735 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x439b60d5 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@404bb685 2024-12-12T05:41:32,743 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d79f1c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:41:32,744 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5f48b1c2 to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@42aacb30 2024-12-12T05:41:32,751 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40dfd554, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:41:32,752 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7287c75d to 127.0.0.1:60303 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@66e06176 2024-12-12T05:41:32,760 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@582b6d8b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T05:41:32,764 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:41:32,764 DEBUG [hconnection-0x20864317-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:41:32,765 DEBUG [hconnection-0x3c597009-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:41:32,765 DEBUG [hconnection-0x6caba83-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:41:32,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=153, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees 2024-12-12T05:41:32,767 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51112, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:41:32,767 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51106, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:41:32,767 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51090, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:41:32,767 DEBUG [hconnection-0x62c33a55-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:41:32,768 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=153, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:41:32,769 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51124, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:41:32,769 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=153, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:41:32,769 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:41:32,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-12T05:41:32,772 DEBUG [hconnection-0x3847260e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:41:32,772 DEBUG [hconnection-0x21d781b8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:41:32,772 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51126, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:41:32,773 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51142, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:41:32,773 DEBUG [hconnection-0x13d881e9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:41:32,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:32,773 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7af3549cbdbd66c1f5a0c758d39edf04 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T05:41:32,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=A 2024-12-12T05:41:32,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:32,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=B 2024-12-12T05:41:32,774 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51158, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:41:32,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:32,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=C 2024-12-12T05:41:32,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:32,779 DEBUG [hconnection-0x325c2fc2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:41:32,780 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51160, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:41:32,780 DEBUG [hconnection-0x79f0ebf7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:41:32,781 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51164, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:41:32,783 DEBUG [hconnection-0x1ed50662-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T05:41:32,784 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51180, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T05:41:32,787 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:32,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982152786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:32,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:32,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:32,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982152787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:32,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982152788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:32,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:32,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51112 deadline: 1733982152788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:32,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:32,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982152788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:32,801 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121243fdd95e5acc4521883481810741b802_7af3549cbdbd66c1f5a0c758d39edf04 is 50, key is test_row_0/A:col10/1733982092771/Put/seqid=0 2024-12-12T05:41:32,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742334_1510 (size=12154) 2024-12-12T05:41:32,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-12T05:41:32,890 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:32,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982152888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:32,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:32,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51112 deadline: 1733982152890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:32,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:32,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982152890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:32,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:32,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982152890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:32,893 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:32,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982152891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:32,921 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:32,921 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-12T05:41:32,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:32,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:32,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:32,921 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:32,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:32,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:33,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-12T05:41:33,073 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:33,073 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-12T05:41:33,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:33,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:33,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:33,073 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:33,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:33,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:33,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:33,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982153092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:33,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:33,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51112 deadline: 1733982153092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:33,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:33,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:33,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982153092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:33,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982153093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:33,094 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:33,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982153093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:33,205 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:33,208 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121243fdd95e5acc4521883481810741b802_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121243fdd95e5acc4521883481810741b802_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:33,209 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/15abe53eddbb43ce8c19cda0eb3cc0cc, store: [table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:33,209 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/15abe53eddbb43ce8c19cda0eb3cc0cc is 175, key is test_row_0/A:col10/1733982092771/Put/seqid=0 2024-12-12T05:41:33,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742335_1511 (size=30955) 2024-12-12T05:41:33,220 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/15abe53eddbb43ce8c19cda0eb3cc0cc 2024-12-12T05:41:33,225 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:33,225 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-12T05:41:33,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:33,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:33,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:33,225 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:33,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:33,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:33,240 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/6861d002ced84ab2b7b4061d0bb0c9d4 is 50, key is test_row_0/B:col10/1733982092771/Put/seqid=0 2024-12-12T05:41:33,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742336_1512 (size=12001) 2024-12-12T05:41:33,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-12T05:41:33,378 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:33,379 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-12T05:41:33,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:33,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:33,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:33,379 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:33,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:33,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:33,396 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:33,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51112 deadline: 1733982153395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:33,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:33,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982153396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:33,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:33,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982153396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:33,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:33,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982153396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:33,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:33,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982153397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:33,530 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:33,531 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-12T05:41:33,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:33,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:33,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:33,531 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:33,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:33,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:33,644 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/6861d002ced84ab2b7b4061d0bb0c9d4 2024-12-12T05:41:33,666 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/1a6c34fc98c84c729dbaf07cf3cfb4ac is 50, key is test_row_0/C:col10/1733982092771/Put/seqid=0 2024-12-12T05:41:33,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742337_1513 (size=12001) 2024-12-12T05:41:33,683 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:33,683 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-12T05:41:33,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:33,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:33,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:33,684 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:33,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:33,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:33,835 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:33,836 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-12T05:41:33,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:33,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:33,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:33,836 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:33,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:33,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:33,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-12T05:41:33,898 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:33,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51112 deadline: 1733982153897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:33,899 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:33,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982153899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:33,901 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:33,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982153900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:33,902 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:33,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982153901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:33,902 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:33,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982153901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:33,988 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:33,988 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-12T05:41:33,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:33,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:33,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:33,988 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:33,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:33,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:34,070 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/1a6c34fc98c84c729dbaf07cf3cfb4ac 2024-12-12T05:41:34,073 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/15abe53eddbb43ce8c19cda0eb3cc0cc as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/15abe53eddbb43ce8c19cda0eb3cc0cc 2024-12-12T05:41:34,075 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/15abe53eddbb43ce8c19cda0eb3cc0cc, entries=150, sequenceid=16, filesize=30.2 K 2024-12-12T05:41:34,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/6861d002ced84ab2b7b4061d0bb0c9d4 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/6861d002ced84ab2b7b4061d0bb0c9d4 2024-12-12T05:41:34,079 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/6861d002ced84ab2b7b4061d0bb0c9d4, entries=150, sequenceid=16, filesize=11.7 K 2024-12-12T05:41:34,079 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/1a6c34fc98c84c729dbaf07cf3cfb4ac as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/1a6c34fc98c84c729dbaf07cf3cfb4ac 2024-12-12T05:41:34,081 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/1a6c34fc98c84c729dbaf07cf3cfb4ac, entries=150, sequenceid=16, filesize=11.7 K 2024-12-12T05:41:34,082 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for 7af3549cbdbd66c1f5a0c758d39edf04 in 1309ms, sequenceid=16, compaction requested=false 2024-12-12T05:41:34,082 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:34,140 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:34,140 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-12T05:41:34,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:34,140 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2837): Flushing 7af3549cbdbd66c1f5a0c758d39edf04 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T05:41:34,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=A 2024-12-12T05:41:34,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:34,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=B 2024-12-12T05:41:34,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:34,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=C 2024-12-12T05:41:34,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:34,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412120aef8e239ef74ff68f63ddd9c6c9a3b1_7af3549cbdbd66c1f5a0c758d39edf04 is 50, key is test_row_0/A:col10/1733982092787/Put/seqid=0 2024-12-12T05:41:34,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742338_1514 (size=12154) 2024-12-12T05:41:34,250 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-12T05:41:34,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:34,564 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412120aef8e239ef74ff68f63ddd9c6c9a3b1_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412120aef8e239ef74ff68f63ddd9c6c9a3b1_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:34,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/a78dc5f406904d5a98fe68565e3b7d3d, store: [table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:34,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/a78dc5f406904d5a98fe68565e3b7d3d is 175, key is test_row_0/A:col10/1733982092787/Put/seqid=0 2024-12-12T05:41:34,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742339_1515 (size=30955) 2024-12-12T05:41:34,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-12T05:41:34,905 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:34,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:34,911 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:34,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51112 deadline: 1733982154910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:34,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:34,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982154910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:34,912 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:34,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982154911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:34,913 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:34,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982154911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:34,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:34,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982154912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:34,972 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/a78dc5f406904d5a98fe68565e3b7d3d 2024-12-12T05:41:34,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/23ddf3c777204949b7ddd20550e32590 is 50, key is test_row_0/B:col10/1733982092787/Put/seqid=0 2024-12-12T05:41:34,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742340_1516 (size=12001) 2024-12-12T05:41:35,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:35,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51112 deadline: 1733982155012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:35,014 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:35,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982155014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:35,016 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:35,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982155015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:35,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:35,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51112 deadline: 1733982155215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:35,217 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:35,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982155216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:35,218 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:35,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982155216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:35,382 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/23ddf3c777204949b7ddd20550e32590 2024-12-12T05:41:35,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/8b881784030f4c93ae4d1ebf9929c45d is 50, key is test_row_0/C:col10/1733982092787/Put/seqid=0 2024-12-12T05:41:35,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742341_1517 (size=12001) 2024-12-12T05:41:35,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:35,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51112 deadline: 1733982155519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:35,520 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:35,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982155519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:35,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:35,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982155520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:35,798 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/8b881784030f4c93ae4d1ebf9929c45d 2024-12-12T05:41:35,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/a78dc5f406904d5a98fe68565e3b7d3d as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/a78dc5f406904d5a98fe68565e3b7d3d 2024-12-12T05:41:35,804 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/a78dc5f406904d5a98fe68565e3b7d3d, entries=150, sequenceid=42, filesize=30.2 K 2024-12-12T05:41:35,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/23ddf3c777204949b7ddd20550e32590 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/23ddf3c777204949b7ddd20550e32590 2024-12-12T05:41:35,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,807 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/23ddf3c777204949b7ddd20550e32590, entries=150, sequenceid=42, filesize=11.7 K 2024-12-12T05:41:35,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/8b881784030f4c93ae4d1ebf9929c45d as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/8b881784030f4c93ae4d1ebf9929c45d 2024-12-12T05:41:35,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,811 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/8b881784030f4c93ae4d1ebf9929c45d, entries=150, sequenceid=42, filesize=11.7 K 2024-12-12T05:41:35,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,812 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 7af3549cbdbd66c1f5a0c758d39edf04 in 1672ms, sequenceid=42, compaction requested=false 2024-12-12T05:41:35,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2538): Flush status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:35,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:35,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=154 2024-12-12T05:41:35,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=154 2024-12-12T05:41:35,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,814 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=153 2024-12-12T05:41:35,814 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=153, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.0440 sec 2024-12-12T05:41:35,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,815 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees in 3.0500 sec 2024-12-12T05:41:35,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:35,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:36,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,025 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7af3549cbdbd66c1f5a0c758d39edf04 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T05:41:36,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=A 2024-12-12T05:41:36,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:36,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=B 2024-12-12T05:41:36,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:36,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=C 2024-12-12T05:41:36,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:36,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,031 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212129b817b1490423dbba46584ce223a83_7af3549cbdbd66c1f5a0c758d39edf04 is 50, key is test_row_0/A:col10/1733982096025/Put/seqid=0 2024-12-12T05:41:36,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742342_1518 (size=14594) 2024-12-12T05:41:36,049 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:36,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982156047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:36,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:36,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982156048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:36,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:36,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51112 deadline: 1733982156049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:36,151 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:36,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982156150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:36,152 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:36,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982156150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:36,152 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:36,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51112 deadline: 1733982156151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:36,354 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:36,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982156353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:36,354 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:36,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51112 deadline: 1733982156353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:36,354 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:36,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982156353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:36,435 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:36,439 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212129b817b1490423dbba46584ce223a83_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212129b817b1490423dbba46584ce223a83_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:36,439 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/44378ac5375943f68198c207b135ecfd, store: [table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:36,440 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/44378ac5375943f68198c207b135ecfd is 175, key is test_row_0/A:col10/1733982096025/Put/seqid=0 2024-12-12T05:41:36,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742343_1519 (size=39549) 2024-12-12T05:41:36,443 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=55, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/44378ac5375943f68198c207b135ecfd 2024-12-12T05:41:36,468 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/f9c9df745e2b4ce895b6d290bf74dc6c is 50, key is test_row_0/B:col10/1733982096025/Put/seqid=0 2024-12-12T05:41:36,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742344_1520 (size=12001) 2024-12-12T05:41:36,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:36,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982156656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:36,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:36,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51112 deadline: 1733982156656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:36,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:36,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982156657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:36,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-12T05:41:36,880 INFO [Thread-2284 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 153 completed 2024-12-12T05:41:36,881 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:41:36,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees 2024-12-12T05:41:36,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-12T05:41:36,882 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:41:36,882 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:41:36,883 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/f9c9df745e2b4ce895b6d290bf74dc6c 2024-12-12T05:41:36,883 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:41:36,887 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/d3c5d9c69fc64ed3960b8981557eedb7 is 50, key is test_row_0/C:col10/1733982096025/Put/seqid=0 2024-12-12T05:41:36,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742345_1521 (size=12001) 2024-12-12T05:41:36,924 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:36,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982156924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:36,925 DEBUG [Thread-2280 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4138 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04., hostname=83e80bf221ca,46457,1733981928566, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:41:36,929 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:36,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982156927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:36,929 DEBUG [Thread-2278 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4143 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04., hostname=83e80bf221ca,46457,1733981928566, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:41:36,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-12T05:41:37,033 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:37,033 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-12T05:41:37,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:37,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:37,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:37,033 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:37,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:37,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:37,161 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:37,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982157160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:37,162 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:37,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51112 deadline: 1733982157161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:37,163 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:37,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982157161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:37,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-12T05:41:37,185 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:37,185 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-12T05:41:37,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:37,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:37,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:37,186 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:37,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:37,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:37,291 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/d3c5d9c69fc64ed3960b8981557eedb7 2024-12-12T05:41:37,293 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/44378ac5375943f68198c207b135ecfd as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/44378ac5375943f68198c207b135ecfd 2024-12-12T05:41:37,296 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/44378ac5375943f68198c207b135ecfd, entries=200, sequenceid=55, filesize=38.6 K 2024-12-12T05:41:37,296 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/f9c9df745e2b4ce895b6d290bf74dc6c as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/f9c9df745e2b4ce895b6d290bf74dc6c 2024-12-12T05:41:37,298 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/f9c9df745e2b4ce895b6d290bf74dc6c, entries=150, sequenceid=55, filesize=11.7 K 2024-12-12T05:41:37,299 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/d3c5d9c69fc64ed3960b8981557eedb7 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/d3c5d9c69fc64ed3960b8981557eedb7 2024-12-12T05:41:37,301 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/d3c5d9c69fc64ed3960b8981557eedb7, entries=150, sequenceid=55, filesize=11.7 K 2024-12-12T05:41:37,302 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 7af3549cbdbd66c1f5a0c758d39edf04 in 1277ms, sequenceid=55, compaction requested=true 2024-12-12T05:41:37,302 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:37,302 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7af3549cbdbd66c1f5a0c758d39edf04:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:41:37,302 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:37,302 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:37,302 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7af3549cbdbd66c1f5a0c758d39edf04:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:41:37,302 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:37,302 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7af3549cbdbd66c1f5a0c758d39edf04:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:41:37,302 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:37,302 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:37,303 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:37,303 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 7af3549cbdbd66c1f5a0c758d39edf04/A is initiating minor compaction (all files) 2024-12-12T05:41:37,303 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7af3549cbdbd66c1f5a0c758d39edf04/A in TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:37,303 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/15abe53eddbb43ce8c19cda0eb3cc0cc, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/a78dc5f406904d5a98fe68565e3b7d3d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/44378ac5375943f68198c207b135ecfd] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp, totalSize=99.1 K 2024-12-12T05:41:37,303 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:37,303 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. files: [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/15abe53eddbb43ce8c19cda0eb3cc0cc, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/a78dc5f406904d5a98fe68565e3b7d3d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/44378ac5375943f68198c207b135ecfd] 2024-12-12T05:41:37,304 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15abe53eddbb43ce8c19cda0eb3cc0cc, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733982092771 2024-12-12T05:41:37,304 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:37,304 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 7af3549cbdbd66c1f5a0c758d39edf04/B is initiating minor compaction (all files) 2024-12-12T05:41:37,304 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting a78dc5f406904d5a98fe68565e3b7d3d, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733982092780 2024-12-12T05:41:37,304 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7af3549cbdbd66c1f5a0c758d39edf04/B in TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:37,304 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/6861d002ced84ab2b7b4061d0bb0c9d4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/23ddf3c777204949b7ddd20550e32590, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/f9c9df745e2b4ce895b6d290bf74dc6c] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp, totalSize=35.2 K 2024-12-12T05:41:37,304 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 44378ac5375943f68198c207b135ecfd, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733982094910 2024-12-12T05:41:37,304 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 6861d002ced84ab2b7b4061d0bb0c9d4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733982092771 2024-12-12T05:41:37,304 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 23ddf3c777204949b7ddd20550e32590, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733982092780 2024-12-12T05:41:37,305 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting f9c9df745e2b4ce895b6d290bf74dc6c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733982094911 2024-12-12T05:41:37,308 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:37,309 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412129b205fd75bf844a7b54618b37d8a5c18_7af3549cbdbd66c1f5a0c758d39edf04 store=[table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:37,310 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412129b205fd75bf844a7b54618b37d8a5c18_7af3549cbdbd66c1f5a0c758d39edf04, store=[table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:37,311 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412129b205fd75bf844a7b54618b37d8a5c18_7af3549cbdbd66c1f5a0c758d39edf04 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:37,311 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7af3549cbdbd66c1f5a0c758d39edf04#B#compaction#442 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:37,311 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/c7c4e71a5f1c4f8a9faa9dd0816c1fb1 is 50, key is test_row_0/B:col10/1733982096025/Put/seqid=0 2024-12-12T05:41:37,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742346_1522 (size=4469) 2024-12-12T05:41:37,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742347_1523 (size=12104) 2024-12-12T05:41:37,314 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7af3549cbdbd66c1f5a0c758d39edf04#A#compaction#441 average throughput is 4.07 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:37,315 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/febbab45559142c6b3eee959009e0639 is 175, key is test_row_0/A:col10/1733982096025/Put/seqid=0 2024-12-12T05:41:37,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742348_1524 (size=31058) 2024-12-12T05:41:37,337 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:37,338 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-12T05:41:37,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:37,338 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2837): Flushing 7af3549cbdbd66c1f5a0c758d39edf04 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-12T05:41:37,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=A 2024-12-12T05:41:37,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:37,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=B 2024-12-12T05:41:37,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:37,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=C 2024-12-12T05:41:37,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:37,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212161108dd1aaf4a92bbd14e9761eca64c_7af3549cbdbd66c1f5a0c758d39edf04 is 50, key is test_row_0/A:col10/1733982096047/Put/seqid=0 2024-12-12T05:41:37,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742349_1525 (size=12154) 2024-12-12T05:41:37,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-12T05:41:37,718 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/c7c4e71a5f1c4f8a9faa9dd0816c1fb1 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/c7c4e71a5f1c4f8a9faa9dd0816c1fb1 2024-12-12T05:41:37,720 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/febbab45559142c6b3eee959009e0639 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/febbab45559142c6b3eee959009e0639 2024-12-12T05:41:37,721 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7af3549cbdbd66c1f5a0c758d39edf04/B of 7af3549cbdbd66c1f5a0c758d39edf04 into c7c4e71a5f1c4f8a9faa9dd0816c1fb1(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:37,721 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:37,721 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04., storeName=7af3549cbdbd66c1f5a0c758d39edf04/B, priority=13, startTime=1733982097302; duration=0sec 2024-12-12T05:41:37,721 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:37,721 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7af3549cbdbd66c1f5a0c758d39edf04:B 2024-12-12T05:41:37,721 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:37,722 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:37,722 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 7af3549cbdbd66c1f5a0c758d39edf04/C is initiating minor compaction (all files) 2024-12-12T05:41:37,722 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7af3549cbdbd66c1f5a0c758d39edf04/C in TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:37,722 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/1a6c34fc98c84c729dbaf07cf3cfb4ac, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/8b881784030f4c93ae4d1ebf9929c45d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/d3c5d9c69fc64ed3960b8981557eedb7] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp, totalSize=35.2 K 2024-12-12T05:41:37,722 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a6c34fc98c84c729dbaf07cf3cfb4ac, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733982092771 2024-12-12T05:41:37,722 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b881784030f4c93ae4d1ebf9929c45d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733982092780 2024-12-12T05:41:37,723 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting d3c5d9c69fc64ed3960b8981557eedb7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733982094911 2024-12-12T05:41:37,723 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7af3549cbdbd66c1f5a0c758d39edf04/A of 7af3549cbdbd66c1f5a0c758d39edf04 into febbab45559142c6b3eee959009e0639(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:37,723 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:37,723 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04., storeName=7af3549cbdbd66c1f5a0c758d39edf04/A, priority=13, startTime=1733982097302; duration=0sec 2024-12-12T05:41:37,723 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:37,723 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7af3549cbdbd66c1f5a0c758d39edf04:A 2024-12-12T05:41:37,728 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7af3549cbdbd66c1f5a0c758d39edf04#C#compaction#444 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:37,728 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/652e5b4d235b44a3802b8fc1b94e3107 is 50, key is test_row_0/C:col10/1733982096025/Put/seqid=0 2024-12-12T05:41:37,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742350_1526 (size=12104) 2024-12-12T05:41:37,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:37,749 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212161108dd1aaf4a92bbd14e9761eca64c_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212161108dd1aaf4a92bbd14e9761eca64c_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:37,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/2dbc79757b434da89bb38280fb24633f, store: [table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:37,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/2dbc79757b434da89bb38280fb24633f is 175, key is test_row_0/A:col10/1733982096047/Put/seqid=0 2024-12-12T05:41:37,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742351_1527 (size=30955) 2024-12-12T05:41:37,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-12T05:41:38,144 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/652e5b4d235b44a3802b8fc1b94e3107 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/652e5b4d235b44a3802b8fc1b94e3107 2024-12-12T05:41:38,147 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7af3549cbdbd66c1f5a0c758d39edf04/C of 7af3549cbdbd66c1f5a0c758d39edf04 into 652e5b4d235b44a3802b8fc1b94e3107(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:38,147 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:38,147 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04., storeName=7af3549cbdbd66c1f5a0c758d39edf04/C, priority=13, startTime=1733982097302; duration=0sec 2024-12-12T05:41:38,147 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:38,147 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7af3549cbdbd66c1f5a0c758d39edf04:C 2024-12-12T05:41:38,153 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/2dbc79757b434da89bb38280fb24633f 2024-12-12T05:41:38,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/45027ce135954d03a866f9c57b2f6b16 is 50, key is test_row_0/B:col10/1733982096047/Put/seqid=0 2024-12-12T05:41:38,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:38,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:38,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742352_1528 (size=12001) 2024-12-12T05:41:38,174 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:38,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982158173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:38,175 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:38,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51112 deadline: 1733982158174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:38,176 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:38,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982158175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:38,276 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:38,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982158275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:38,277 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:38,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51112 deadline: 1733982158276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:38,278 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:38,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982158277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:38,478 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:38,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982158477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:38,479 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:38,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51112 deadline: 1733982158479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:38,480 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:38,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982158479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:38,566 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/45027ce135954d03a866f9c57b2f6b16 2024-12-12T05:41:38,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/8047f779c8734d41bd328835cdaf6b32 is 50, key is test_row_0/C:col10/1733982096047/Put/seqid=0 2024-12-12T05:41:38,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742353_1529 (size=12001) 2024-12-12T05:41:38,782 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:38,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51112 deadline: 1733982158780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:38,782 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:38,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982158781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:38,783 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:38,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982158782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:38,974 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/8047f779c8734d41bd328835cdaf6b32 2024-12-12T05:41:38,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/2dbc79757b434da89bb38280fb24633f as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/2dbc79757b434da89bb38280fb24633f 2024-12-12T05:41:38,980 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/2dbc79757b434da89bb38280fb24633f, entries=150, sequenceid=78, filesize=30.2 K 2024-12-12T05:41:38,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/45027ce135954d03a866f9c57b2f6b16 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/45027ce135954d03a866f9c57b2f6b16 2024-12-12T05:41:38,982 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/45027ce135954d03a866f9c57b2f6b16, entries=150, sequenceid=78, filesize=11.7 K 2024-12-12T05:41:38,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/8047f779c8734d41bd328835cdaf6b32 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/8047f779c8734d41bd328835cdaf6b32 2024-12-12T05:41:38,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-12T05:41:38,985 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/8047f779c8734d41bd328835cdaf6b32, entries=150, sequenceid=78, filesize=11.7 K 2024-12-12T05:41:38,986 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 7af3549cbdbd66c1f5a0c758d39edf04 in 1648ms, sequenceid=78, compaction requested=false 2024-12-12T05:41:38,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2538): Flush status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:38,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:38,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-12-12T05:41:38,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=156 2024-12-12T05:41:38,988 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-12-12T05:41:38,988 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1040 sec 2024-12-12T05:41:38,989 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees in 2.1070 sec 2024-12-12T05:41:39,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:39,286 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7af3549cbdbd66c1f5a0c758d39edf04 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-12T05:41:39,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=A 2024-12-12T05:41:39,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:39,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=B 2024-12-12T05:41:39,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:39,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=C 2024-12-12T05:41:39,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:39,291 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212415a26455ee649fca220bfe2120a2b6f_7af3549cbdbd66c1f5a0c758d39edf04 is 50, key is test_row_0/A:col10/1733982098174/Put/seqid=0 2024-12-12T05:41:39,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742354_1530 (size=12154) 2024-12-12T05:41:39,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:39,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982159330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:39,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:39,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982159330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:39,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:39,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51112 deadline: 1733982159330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:39,435 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:39,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982159433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:39,435 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:39,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51112 deadline: 1733982159433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:39,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:39,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982159433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:39,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:39,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51112 deadline: 1733982159636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:39,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:39,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982159636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:39,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:39,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982159636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:39,709 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:39,712 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212415a26455ee649fca220bfe2120a2b6f_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212415a26455ee649fca220bfe2120a2b6f_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:39,712 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/35f7ef3f5c5a4e30be05fa98e3d02d77, store: [table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:39,713 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/35f7ef3f5c5a4e30be05fa98e3d02d77 is 175, key is test_row_0/A:col10/1733982098174/Put/seqid=0 2024-12-12T05:41:39,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742355_1531 (size=30955) 2024-12-12T05:41:39,940 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:39,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982159939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:39,940 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:39,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982159940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:39,942 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:39,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51112 deadline: 1733982159940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:40,116 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=96, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/35f7ef3f5c5a4e30be05fa98e3d02d77 2024-12-12T05:41:40,120 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/152412867bc24733ae3e68f8e846c1fa is 50, key is test_row_0/B:col10/1733982098174/Put/seqid=0 2024-12-12T05:41:40,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742356_1532 (size=12001) 2024-12-12T05:41:40,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:40,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982160442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:40,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:40,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982160443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:40,448 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:40,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51112 deadline: 1733982160446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:40,524 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/152412867bc24733ae3e68f8e846c1fa 2024-12-12T05:41:40,528 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/e3331178e478466ab17a38baa58da6c1 is 50, key is test_row_0/C:col10/1733982098174/Put/seqid=0 2024-12-12T05:41:40,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742357_1533 (size=12001) 2024-12-12T05:41:40,931 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/e3331178e478466ab17a38baa58da6c1 2024-12-12T05:41:40,934 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/35f7ef3f5c5a4e30be05fa98e3d02d77 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/35f7ef3f5c5a4e30be05fa98e3d02d77 2024-12-12T05:41:40,937 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/35f7ef3f5c5a4e30be05fa98e3d02d77, entries=150, sequenceid=96, filesize=30.2 K 2024-12-12T05:41:40,937 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/152412867bc24733ae3e68f8e846c1fa as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/152412867bc24733ae3e68f8e846c1fa 2024-12-12T05:41:40,940 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/152412867bc24733ae3e68f8e846c1fa, entries=150, sequenceid=96, filesize=11.7 K 2024-12-12T05:41:40,940 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/e3331178e478466ab17a38baa58da6c1 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/e3331178e478466ab17a38baa58da6c1 2024-12-12T05:41:40,943 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/e3331178e478466ab17a38baa58da6c1, entries=150, sequenceid=96, filesize=11.7 K 2024-12-12T05:41:40,943 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 7af3549cbdbd66c1f5a0c758d39edf04 in 1657ms, sequenceid=96, compaction requested=true 2024-12-12T05:41:40,943 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:40,944 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:40,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7af3549cbdbd66c1f5a0c758d39edf04:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:41:40,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:40,944 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:40,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7af3549cbdbd66c1f5a0c758d39edf04:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:41:40,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:40,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7af3549cbdbd66c1f5a0c758d39edf04:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:41:40,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:40,945 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:40,945 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92968 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:40,945 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 7af3549cbdbd66c1f5a0c758d39edf04/B is initiating minor compaction (all files) 2024-12-12T05:41:40,945 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 7af3549cbdbd66c1f5a0c758d39edf04/A is initiating minor compaction (all files) 2024-12-12T05:41:40,945 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7af3549cbdbd66c1f5a0c758d39edf04/B in TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:40,945 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7af3549cbdbd66c1f5a0c758d39edf04/A in TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:40,945 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/c7c4e71a5f1c4f8a9faa9dd0816c1fb1, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/45027ce135954d03a866f9c57b2f6b16, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/152412867bc24733ae3e68f8e846c1fa] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp, totalSize=35.3 K 2024-12-12T05:41:40,945 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/febbab45559142c6b3eee959009e0639, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/2dbc79757b434da89bb38280fb24633f, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/35f7ef3f5c5a4e30be05fa98e3d02d77] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp, totalSize=90.8 K 2024-12-12T05:41:40,945 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:40,945 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. files: [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/febbab45559142c6b3eee959009e0639, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/2dbc79757b434da89bb38280fb24633f, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/35f7ef3f5c5a4e30be05fa98e3d02d77] 2024-12-12T05:41:40,945 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting c7c4e71a5f1c4f8a9faa9dd0816c1fb1, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733982094911 2024-12-12T05:41:40,945 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting febbab45559142c6b3eee959009e0639, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733982094911 2024-12-12T05:41:40,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:40,945 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 45027ce135954d03a866f9c57b2f6b16, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733982096045 2024-12-12T05:41:40,945 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7af3549cbdbd66c1f5a0c758d39edf04 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-12T05:41:40,945 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2dbc79757b434da89bb38280fb24633f, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733982096045 2024-12-12T05:41:40,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=A 2024-12-12T05:41:40,946 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 152412867bc24733ae3e68f8e846c1fa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733982098173 2024-12-12T05:41:40,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:40,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=B 2024-12-12T05:41:40,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:40,946 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 35f7ef3f5c5a4e30be05fa98e3d02d77, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733982098173 2024-12-12T05:41:40,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=C 2024-12-12T05:41:40,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:40,951 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:40,951 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212ee990e0e4e9c4974ae9c8102fc405d48_7af3549cbdbd66c1f5a0c758d39edf04 is 50, key is test_row_0/A:col10/1733982100944/Put/seqid=0 2024-12-12T05:41:40,952 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7af3549cbdbd66c1f5a0c758d39edf04#B#compaction#452 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:40,953 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/476ac6508ba64836ad1b746e8bae7b9a is 50, key is test_row_0/B:col10/1733982098174/Put/seqid=0 2024-12-12T05:41:40,956 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412128af7af497b944515afd3a9fc355901bb_7af3549cbdbd66c1f5a0c758d39edf04 store=[table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:40,957 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412128af7af497b944515afd3a9fc355901bb_7af3549cbdbd66c1f5a0c758d39edf04, store=[table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:40,957 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412128af7af497b944515afd3a9fc355901bb_7af3549cbdbd66c1f5a0c758d39edf04 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:40,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:40,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982160961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:40,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:40,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982160963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:40,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742359_1535 (size=12154) 2024-12-12T05:41:40,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742358_1534 (size=12207) 2024-12-12T05:41:40,976 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/476ac6508ba64836ad1b746e8bae7b9a as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/476ac6508ba64836ad1b746e8bae7b9a 2024-12-12T05:41:40,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742360_1536 (size=4469) 2024-12-12T05:41:40,979 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7af3549cbdbd66c1f5a0c758d39edf04/B of 7af3549cbdbd66c1f5a0c758d39edf04 into 476ac6508ba64836ad1b746e8bae7b9a(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:40,979 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:40,979 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04., storeName=7af3549cbdbd66c1f5a0c758d39edf04/B, priority=13, startTime=1733982100944; duration=0sec 2024-12-12T05:41:40,979 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:40,979 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7af3549cbdbd66c1f5a0c758d39edf04:B 2024-12-12T05:41:40,979 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:40,979 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:40,979 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 7af3549cbdbd66c1f5a0c758d39edf04/C is initiating minor compaction (all files) 2024-12-12T05:41:40,979 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7af3549cbdbd66c1f5a0c758d39edf04/C in TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:40,980 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/652e5b4d235b44a3802b8fc1b94e3107, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/8047f779c8734d41bd328835cdaf6b32, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/e3331178e478466ab17a38baa58da6c1] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp, totalSize=35.3 K 2024-12-12T05:41:40,980 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 652e5b4d235b44a3802b8fc1b94e3107, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733982094911 2024-12-12T05:41:40,980 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 8047f779c8734d41bd328835cdaf6b32, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733982096045 2024-12-12T05:41:40,980 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting e3331178e478466ab17a38baa58da6c1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733982098173 2024-12-12T05:41:40,985 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7af3549cbdbd66c1f5a0c758d39edf04#C#compaction#453 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:40,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-12T05:41:40,985 INFO [Thread-2284 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 155 completed 2024-12-12T05:41:40,986 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/7407d334f34e4a669772437508578a8b is 50, key is test_row_0/C:col10/1733982098174/Put/seqid=0 2024-12-12T05:41:40,986 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:41:40,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees 2024-12-12T05:41:40,987 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:41:40,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-12T05:41:40,988 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:41:40,988 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:41:40,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742361_1537 (size=12207) 2024-12-12T05:41:40,994 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/7407d334f34e4a669772437508578a8b as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/7407d334f34e4a669772437508578a8b 2024-12-12T05:41:40,997 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7af3549cbdbd66c1f5a0c758d39edf04/C of 7af3549cbdbd66c1f5a0c758d39edf04 into 7407d334f34e4a669772437508578a8b(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:40,997 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:40,997 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04., storeName=7af3549cbdbd66c1f5a0c758d39edf04/C, priority=13, startTime=1733982100944; duration=0sec 2024-12-12T05:41:40,997 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:40,997 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7af3549cbdbd66c1f5a0c758d39edf04:C 2024-12-12T05:41:41,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:41,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982161064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:41,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:41,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982161066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:41,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-12T05:41:41,139 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:41,140 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-12T05:41:41,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:41,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:41,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:41,140 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:41,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:41,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:41,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:41,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982161266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:41,268 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:41,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982161268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:41,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-12T05:41:41,291 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:41,292 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-12T05:41:41,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:41,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:41,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:41,292 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:41,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:41,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:41,371 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:41,373 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212ee990e0e4e9c4974ae9c8102fc405d48_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212ee990e0e4e9c4974ae9c8102fc405d48_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:41,374 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/4eb1479ecbe64779bf4c0531d28cc724, store: [table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:41,374 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/4eb1479ecbe64779bf4c0531d28cc724 is 175, key is test_row_0/A:col10/1733982100944/Put/seqid=0 2024-12-12T05:41:41,377 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7af3549cbdbd66c1f5a0c758d39edf04#A#compaction#451 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:41,378 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/c046502c44f64421a7a0fa98bdf97524 is 175, key is test_row_0/A:col10/1733982098174/Put/seqid=0 2024-12-12T05:41:41,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742362_1538 (size=30955) 2024-12-12T05:41:41,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742363_1539 (size=31161) 2024-12-12T05:41:41,444 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:41,444 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-12T05:41:41,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:41,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:41,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:41,444 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:41,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:41,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:41,447 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:41,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982161446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:41,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:41,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982161448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:41,456 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:41,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51112 deadline: 1733982161454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:41,570 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:41,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982161569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:41,570 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:41,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982161569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:41,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-12T05:41:41,596 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:41,596 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-12T05:41:41,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:41,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:41,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:41,596 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:41,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:41,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:41,748 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:41,748 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-12T05:41:41,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:41,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:41,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:41,749 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:41,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:41,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:41,778 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=119, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/4eb1479ecbe64779bf4c0531d28cc724 2024-12-12T05:41:41,783 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/cc7f0eb25ba9480e83249c6ee1d2d85f is 50, key is test_row_0/B:col10/1733982100944/Put/seqid=0 2024-12-12T05:41:41,787 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/c046502c44f64421a7a0fa98bdf97524 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/c046502c44f64421a7a0fa98bdf97524 2024-12-12T05:41:41,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742364_1540 (size=12001) 2024-12-12T05:41:41,788 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/cc7f0eb25ba9480e83249c6ee1d2d85f 2024-12-12T05:41:41,790 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7af3549cbdbd66c1f5a0c758d39edf04/A of 7af3549cbdbd66c1f5a0c758d39edf04 into c046502c44f64421a7a0fa98bdf97524(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:41,791 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:41,791 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04., storeName=7af3549cbdbd66c1f5a0c758d39edf04/A, priority=13, startTime=1733982100944; duration=0sec 2024-12-12T05:41:41,791 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:41,791 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7af3549cbdbd66c1f5a0c758d39edf04:A 2024-12-12T05:41:41,793 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/968db790594048f380c5475e5fae77e9 is 50, key is test_row_0/C:col10/1733982100944/Put/seqid=0 2024-12-12T05:41:41,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742365_1541 (size=12001) 2024-12-12T05:41:41,900 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:41,901 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-12T05:41:41,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:41,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:41,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:41,901 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:41,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:41,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:42,052 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:42,053 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-12T05:41:42,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:42,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:42,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:42,053 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:42,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:42,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:42,072 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:42,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982162071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:42,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:42,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982162075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:42,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-12T05:41:42,204 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:42,205 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-12T05:41:42,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:42,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:42,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:42,205 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:42,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:42,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:42,206 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/968db790594048f380c5475e5fae77e9 2024-12-12T05:41:42,209 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/4eb1479ecbe64779bf4c0531d28cc724 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/4eb1479ecbe64779bf4c0531d28cc724 2024-12-12T05:41:42,211 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/4eb1479ecbe64779bf4c0531d28cc724, entries=150, sequenceid=119, filesize=30.2 K 2024-12-12T05:41:42,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/cc7f0eb25ba9480e83249c6ee1d2d85f as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/cc7f0eb25ba9480e83249c6ee1d2d85f 2024-12-12T05:41:42,214 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/cc7f0eb25ba9480e83249c6ee1d2d85f, entries=150, sequenceid=119, filesize=11.7 K 2024-12-12T05:41:42,214 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/968db790594048f380c5475e5fae77e9 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/968db790594048f380c5475e5fae77e9 2024-12-12T05:41:42,216 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/968db790594048f380c5475e5fae77e9, entries=150, sequenceid=119, filesize=11.7 K 2024-12-12T05:41:42,217 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 7af3549cbdbd66c1f5a0c758d39edf04 in 1272ms, sequenceid=119, compaction requested=false 2024-12-12T05:41:42,217 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:42,357 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:42,357 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-12T05:41:42,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:42,357 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2837): Flushing 7af3549cbdbd66c1f5a0c758d39edf04 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-12T05:41:42,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=A 2024-12-12T05:41:42,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:42,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=B 2024-12-12T05:41:42,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:42,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=C 2024-12-12T05:41:42,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:42,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412128502c7a939154c4682f99d02a5257c24_7af3549cbdbd66c1f5a0c758d39edf04 is 50, key is test_row_0/A:col10/1733982100958/Put/seqid=0 2024-12-12T05:41:42,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742366_1542 (size=12254) 2024-12-12T05:41:42,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:42,374 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412128502c7a939154c4682f99d02a5257c24_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412128502c7a939154c4682f99d02a5257c24_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:42,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/65b430ee4eb345c6a30768f4e0690520, store: [table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:42,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/65b430ee4eb345c6a30768f4e0690520 is 175, key is test_row_0/A:col10/1733982100958/Put/seqid=0 2024-12-12T05:41:42,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742367_1543 (size=31055) 2024-12-12T05:41:42,778 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=135, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/65b430ee4eb345c6a30768f4e0690520 2024-12-12T05:41:42,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/4b412d7303f842abb3eced0ff8e07727 is 50, key is test_row_0/B:col10/1733982100958/Put/seqid=0 2024-12-12T05:41:42,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742368_1544 (size=12101) 2024-12-12T05:41:43,081 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:43,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:43,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-12T05:41:43,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:43,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982163102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:43,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:43,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982163103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:43,186 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/4b412d7303f842abb3eced0ff8e07727 2024-12-12T05:41:43,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/a96cf69a1edf42729e5564a15540d194 is 50, key is test_row_0/C:col10/1733982100958/Put/seqid=0 2024-12-12T05:41:43,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742369_1545 (size=12101) 2024-12-12T05:41:43,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:43,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982163205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:43,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:43,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982163205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:43,411 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:43,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982163409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:43,411 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:43,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982163410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:43,452 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:43,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982163450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:43,452 DEBUG [Thread-2282 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4123 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04., hostname=83e80bf221ca,46457,1733981928566, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:41:43,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:43,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51112 deadline: 1733982163464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:43,466 DEBUG [Thread-2276 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4137 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04., hostname=83e80bf221ca,46457,1733981928566, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:41:43,466 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:43,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982163465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:43,466 DEBUG [Thread-2274 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4137 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04., hostname=83e80bf221ca,46457,1733981928566, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:41:43,593 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/a96cf69a1edf42729e5564a15540d194 2024-12-12T05:41:43,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/65b430ee4eb345c6a30768f4e0690520 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/65b430ee4eb345c6a30768f4e0690520 2024-12-12T05:41:43,599 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/65b430ee4eb345c6a30768f4e0690520, entries=150, sequenceid=135, filesize=30.3 K 2024-12-12T05:41:43,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/4b412d7303f842abb3eced0ff8e07727 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/4b412d7303f842abb3eced0ff8e07727 2024-12-12T05:41:43,602 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/4b412d7303f842abb3eced0ff8e07727, entries=150, sequenceid=135, filesize=11.8 K 2024-12-12T05:41:43,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/a96cf69a1edf42729e5564a15540d194 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/a96cf69a1edf42729e5564a15540d194 2024-12-12T05:41:43,605 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/a96cf69a1edf42729e5564a15540d194, entries=150, sequenceid=135, filesize=11.8 K 2024-12-12T05:41:43,605 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 7af3549cbdbd66c1f5a0c758d39edf04 in 1248ms, sequenceid=135, compaction requested=true 2024-12-12T05:41:43,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2538): Flush status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:43,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:43,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=158 2024-12-12T05:41:43,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=158 2024-12-12T05:41:43,607 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=157 2024-12-12T05:41:43,607 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6180 sec 2024-12-12T05:41:43,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees in 2.6220 sec 2024-12-12T05:41:43,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:43,714 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7af3549cbdbd66c1f5a0c758d39edf04 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T05:41:43,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=A 2024-12-12T05:41:43,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:43,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=B 2024-12-12T05:41:43,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:43,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=C 2024-12-12T05:41:43,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:43,719 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212565837bf89db40679798dd50fe8c1ba8_7af3549cbdbd66c1f5a0c758d39edf04 is 50, key is test_row_0/A:col10/1733982103712/Put/seqid=0 2024-12-12T05:41:43,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742370_1546 (size=12304) 2024-12-12T05:41:43,727 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:43,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982163725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:43,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:43,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982163726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:43,831 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:43,831 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:43,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982163828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:43,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982163828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:44,033 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:44,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982164032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:44,033 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:44,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982164032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:44,123 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:44,125 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212565837bf89db40679798dd50fe8c1ba8_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212565837bf89db40679798dd50fe8c1ba8_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:44,126 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/4a3f1079615c477fbe61927640f45bd1, store: [table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:44,126 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/4a3f1079615c477fbe61927640f45bd1 is 175, key is test_row_0/A:col10/1733982103712/Put/seqid=0 2024-12-12T05:41:44,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742371_1547 (size=31105) 2024-12-12T05:41:44,335 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:44,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982164334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:44,336 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:44,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982164335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:44,529 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=160, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/4a3f1079615c477fbe61927640f45bd1 2024-12-12T05:41:44,534 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/a928803aef5542d4bf1d52b231fe694d is 50, key is test_row_0/B:col10/1733982103712/Put/seqid=0 2024-12-12T05:41:44,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742372_1548 (size=12151) 2024-12-12T05:41:44,838 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:44,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982164836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:44,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:44,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982164837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:44,937 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/a928803aef5542d4bf1d52b231fe694d 2024-12-12T05:41:44,943 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/a930753693e140d18a7eea36c2b16b2c is 50, key is test_row_0/C:col10/1733982103712/Put/seqid=0 2024-12-12T05:41:44,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742373_1549 (size=12151) 2024-12-12T05:41:45,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-12T05:41:45,091 INFO [Thread-2284 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 157 completed 2024-12-12T05:41:45,092 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:41:45,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees 2024-12-12T05:41:45,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-12T05:41:45,093 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:41:45,093 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:41:45,094 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:41:45,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-12T05:41:45,244 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:45,245 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-12T05:41:45,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:45,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:45,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:45,245 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:45,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:45,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:45,346 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/a930753693e140d18a7eea36c2b16b2c 2024-12-12T05:41:45,349 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/4a3f1079615c477fbe61927640f45bd1 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/4a3f1079615c477fbe61927640f45bd1 2024-12-12T05:41:45,351 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/4a3f1079615c477fbe61927640f45bd1, entries=150, sequenceid=160, filesize=30.4 K 2024-12-12T05:41:45,352 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/a928803aef5542d4bf1d52b231fe694d as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/a928803aef5542d4bf1d52b231fe694d 2024-12-12T05:41:45,354 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/a928803aef5542d4bf1d52b231fe694d, entries=150, sequenceid=160, filesize=11.9 K 2024-12-12T05:41:45,354 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/a930753693e140d18a7eea36c2b16b2c as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/a930753693e140d18a7eea36c2b16b2c 2024-12-12T05:41:45,357 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/a930753693e140d18a7eea36c2b16b2c, entries=150, sequenceid=160, filesize=11.9 K 2024-12-12T05:41:45,357 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 7af3549cbdbd66c1f5a0c758d39edf04 in 1644ms, sequenceid=160, compaction requested=true 2024-12-12T05:41:45,357 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:45,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7af3549cbdbd66c1f5a0c758d39edf04:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:41:45,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:45,357 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T05:41:45,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7af3549cbdbd66c1f5a0c758d39edf04:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:41:45,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:45,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7af3549cbdbd66c1f5a0c758d39edf04:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:41:45,358 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T05:41:45,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:45,358 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48460 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T05:41:45,358 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 124276 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T05:41:45,358 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 7af3549cbdbd66c1f5a0c758d39edf04/B is initiating minor compaction (all files) 2024-12-12T05:41:45,358 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 7af3549cbdbd66c1f5a0c758d39edf04/A is initiating minor compaction (all files) 2024-12-12T05:41:45,358 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7af3549cbdbd66c1f5a0c758d39edf04/B in TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:45,358 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7af3549cbdbd66c1f5a0c758d39edf04/A in TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:45,359 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/476ac6508ba64836ad1b746e8bae7b9a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/cc7f0eb25ba9480e83249c6ee1d2d85f, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/4b412d7303f842abb3eced0ff8e07727, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/a928803aef5542d4bf1d52b231fe694d] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp, totalSize=47.3 K 2024-12-12T05:41:45,359 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/c046502c44f64421a7a0fa98bdf97524, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/4eb1479ecbe64779bf4c0531d28cc724, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/65b430ee4eb345c6a30768f4e0690520, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/4a3f1079615c477fbe61927640f45bd1] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp, totalSize=121.4 K 2024-12-12T05:41:45,359 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:45,359 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. files: [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/c046502c44f64421a7a0fa98bdf97524, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/4eb1479ecbe64779bf4c0531d28cc724, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/65b430ee4eb345c6a30768f4e0690520, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/4a3f1079615c477fbe61927640f45bd1] 2024-12-12T05:41:45,359 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 476ac6508ba64836ad1b746e8bae7b9a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733982098173 2024-12-12T05:41:45,359 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting c046502c44f64421a7a0fa98bdf97524, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733982098173 2024-12-12T05:41:45,359 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4eb1479ecbe64779bf4c0531d28cc724, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733982099297 2024-12-12T05:41:45,359 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting cc7f0eb25ba9480e83249c6ee1d2d85f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733982099297 2024-12-12T05:41:45,359 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 65b430ee4eb345c6a30768f4e0690520, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733982100958 2024-12-12T05:41:45,359 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b412d7303f842abb3eced0ff8e07727, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733982100958 2024-12-12T05:41:45,359 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a3f1079615c477fbe61927640f45bd1, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733982103097 2024-12-12T05:41:45,359 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting a928803aef5542d4bf1d52b231fe694d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733982103097 2024-12-12T05:41:45,363 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:45,364 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7af3549cbdbd66c1f5a0c758d39edf04#B#compaction#462 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:45,364 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/dfbb5bbd2f344692850300ff7da3690f is 50, key is test_row_0/B:col10/1733982103712/Put/seqid=0 2024-12-12T05:41:45,368 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412122c31a646411d4297a79696091e9ca091_7af3549cbdbd66c1f5a0c758d39edf04 store=[table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:45,370 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412122c31a646411d4297a79696091e9ca091_7af3549cbdbd66c1f5a0c758d39edf04, store=[table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:45,370 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412122c31a646411d4297a79696091e9ca091_7af3549cbdbd66c1f5a0c758d39edf04 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:45,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742374_1550 (size=12493) 2024-12-12T05:41:45,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742375_1551 (size=4469) 2024-12-12T05:41:45,375 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/dfbb5bbd2f344692850300ff7da3690f as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/dfbb5bbd2f344692850300ff7da3690f 2024-12-12T05:41:45,378 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7af3549cbdbd66c1f5a0c758d39edf04/B of 7af3549cbdbd66c1f5a0c758d39edf04 into dfbb5bbd2f344692850300ff7da3690f(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:45,378 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:45,379 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04., storeName=7af3549cbdbd66c1f5a0c758d39edf04/B, priority=12, startTime=1733982105357; duration=0sec 2024-12-12T05:41:45,379 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:45,379 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7af3549cbdbd66c1f5a0c758d39edf04:B 2024-12-12T05:41:45,379 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T05:41:45,379 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48460 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T05:41:45,379 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 7af3549cbdbd66c1f5a0c758d39edf04/C is initiating minor compaction (all files) 2024-12-12T05:41:45,380 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7af3549cbdbd66c1f5a0c758d39edf04/C in TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:45,380 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/7407d334f34e4a669772437508578a8b, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/968db790594048f380c5475e5fae77e9, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/a96cf69a1edf42729e5564a15540d194, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/a930753693e140d18a7eea36c2b16b2c] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp, totalSize=47.3 K 2024-12-12T05:41:45,380 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 7407d334f34e4a669772437508578a8b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733982098173 2024-12-12T05:41:45,380 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 968db790594048f380c5475e5fae77e9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733982099297 2024-12-12T05:41:45,380 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting a96cf69a1edf42729e5564a15540d194, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733982100958 2024-12-12T05:41:45,380 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting a930753693e140d18a7eea36c2b16b2c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733982103097 2024-12-12T05:41:45,385 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7af3549cbdbd66c1f5a0c758d39edf04#C#compaction#464 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:45,386 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/64187b422f7a492ca1208a464babff9c is 50, key is test_row_0/C:col10/1733982103712/Put/seqid=0 2024-12-12T05:41:45,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742376_1552 (size=12493) 2024-12-12T05:41:45,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-12T05:41:45,396 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:45,397 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-12T05:41:45,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:45,397 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2837): Flushing 7af3549cbdbd66c1f5a0c758d39edf04 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T05:41:45,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=A 2024-12-12T05:41:45,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:45,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=B 2024-12-12T05:41:45,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:45,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=C 2024-12-12T05:41:45,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:45,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121293d6940bc5bc43df8d80ddc0d508d666_7af3549cbdbd66c1f5a0c758d39edf04 is 50, key is test_row_0/A:col10/1733982103725/Put/seqid=0 2024-12-12T05:41:45,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742377_1553 (size=12304) 2024-12-12T05:41:45,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-12T05:41:45,774 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7af3549cbdbd66c1f5a0c758d39edf04#A#compaction#463 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:45,774 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/345edd1a11b841018869a7a253f4f9cf is 175, key is test_row_0/A:col10/1733982103712/Put/seqid=0 2024-12-12T05:41:45,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742378_1554 (size=31447) 2024-12-12T05:41:45,792 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/64187b422f7a492ca1208a464babff9c as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/64187b422f7a492ca1208a464babff9c 2024-12-12T05:41:45,794 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7af3549cbdbd66c1f5a0c758d39edf04/C of 7af3549cbdbd66c1f5a0c758d39edf04 into 64187b422f7a492ca1208a464babff9c(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:45,794 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:45,794 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04., storeName=7af3549cbdbd66c1f5a0c758d39edf04/C, priority=12, startTime=1733982105357; duration=0sec 2024-12-12T05:41:45,794 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:45,794 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7af3549cbdbd66c1f5a0c758d39edf04:C 2024-12-12T05:41:45,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,807 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121293d6940bc5bc43df8d80ddc0d508d666_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121293d6940bc5bc43df8d80ddc0d508d666_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:45,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/c9300a5295d8438cb27ae59b75f88cf6, store: [table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:45,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/c9300a5295d8438cb27ae59b75f88cf6 is 175, key is test_row_0/A:col10/1733982103725/Put/seqid=0 2024-12-12T05:41:45,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742379_1555 (size=31105) 2024-12-12T05:41:45,810 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=172, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/c9300a5295d8438cb27ae59b75f88cf6 2024-12-12T05:41:45,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/af31bc4aa8474673969b40b77a1eb497 is 50, key is test_row_0/B:col10/1733982103725/Put/seqid=0 2024-12-12T05:41:45,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742380_1556 (size=12151) 2024-12-12T05:41:45,828 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/af31bc4aa8474673969b40b77a1eb497 2024-12-12T05:41:45,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/fdd51d443af94dc898a540de63fb9aa7 is 50, key is test_row_0/C:col10/1733982103725/Put/seqid=0 2024-12-12T05:41:45,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742381_1557 (size=12151) 2024-12-12T05:41:45,836 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/fdd51d443af94dc898a540de63fb9aa7 2024-12-12T05:41:45,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/c9300a5295d8438cb27ae59b75f88cf6 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/c9300a5295d8438cb27ae59b75f88cf6 2024-12-12T05:41:45,842 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/c9300a5295d8438cb27ae59b75f88cf6, entries=150, sequenceid=172, filesize=30.4 K 2024-12-12T05:41:45,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/af31bc4aa8474673969b40b77a1eb497 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/af31bc4aa8474673969b40b77a1eb497 2024-12-12T05:41:45,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:45,843 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:45,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,846 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/af31bc4aa8474673969b40b77a1eb497, entries=150, sequenceid=172, filesize=11.9 K 2024-12-12T05:41:45,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/fdd51d443af94dc898a540de63fb9aa7 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/fdd51d443af94dc898a540de63fb9aa7 2024-12-12T05:41:45,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,849 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/fdd51d443af94dc898a540de63fb9aa7, entries=150, sequenceid=172, filesize=11.9 K 2024-12-12T05:41:45,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,850 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=6.71 KB/6870 for 7af3549cbdbd66c1f5a0c758d39edf04 in 453ms, sequenceid=172, compaction requested=false 2024-12-12T05:41:45,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2538): Flush status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:45,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:45,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=160 2024-12-12T05:41:45,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=160 2024-12-12T05:41:45,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,852 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-12-12T05:41:45,852 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 758 msec 2024-12-12T05:41:45,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,853 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees in 760 msec 2024-12-12T05:41:45,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:45,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,875 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7af3549cbdbd66c1f5a0c758d39edf04 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T05:41:45,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=A 2024-12-12T05:41:45,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:45,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=B 2024-12-12T05:41:45,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:45,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=C 2024-12-12T05:41:45,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:45,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:45,881 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212c6c4e6dadcd44c4c9b8b5e6cb11df539_7af3549cbdbd66c1f5a0c758d39edf04 is 50, key is test_row_0/A:col10/1733982105875/Put/seqid=0 2024-12-12T05:41:45,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742383_1559 (size=24758) 2024-12-12T05:41:45,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:45,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982165907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:45,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:45,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982165907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:46,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:46,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982166010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:46,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:46,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982166010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:46,181 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/345edd1a11b841018869a7a253f4f9cf as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/345edd1a11b841018869a7a253f4f9cf 2024-12-12T05:41:46,184 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7af3549cbdbd66c1f5a0c758d39edf04/A of 7af3549cbdbd66c1f5a0c758d39edf04 into 345edd1a11b841018869a7a253f4f9cf(size=30.7 K), total size for store is 61.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:46,184 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:46,184 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04., storeName=7af3549cbdbd66c1f5a0c758d39edf04/A, priority=12, startTime=1733982105357; duration=0sec 2024-12-12T05:41:46,184 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:46,184 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7af3549cbdbd66c1f5a0c758d39edf04:A 2024-12-12T05:41:46,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-12T05:41:46,196 INFO [Thread-2284 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 159 completed 2024-12-12T05:41:46,196 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:41:46,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees 2024-12-12T05:41:46,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-12T05:41:46,197 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:41:46,198 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:41:46,198 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:41:46,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:46,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982166212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:46,213 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:46,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982166212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:46,285 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:46,287 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212c6c4e6dadcd44c4c9b8b5e6cb11df539_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c6c4e6dadcd44c4c9b8b5e6cb11df539_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:46,288 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/420c0ef6842b4031910b8e7d6df91666, store: [table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:46,291 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/420c0ef6842b4031910b8e7d6df91666 is 175, key is test_row_0/A:col10/1733982105875/Put/seqid=0 2024-12-12T05:41:46,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742382_1558 (size=74395) 2024-12-12T05:41:46,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-12T05:41:46,349 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:46,349 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-12T05:41:46,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:46,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:46,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:46,350 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:46,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:46,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:46,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-12T05:41:46,501 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:46,501 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-12T05:41:46,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:46,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:46,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:46,502 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:46,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:46,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:46,515 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:46,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982166514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:46,517 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:46,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982166516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:46,653 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:46,654 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-12T05:41:46,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:46,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:46,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:46,654 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:46,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:46,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:46,693 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=185, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/420c0ef6842b4031910b8e7d6df91666 2024-12-12T05:41:46,697 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/0200cf1bf01d423d8f7048db297d3bcc is 50, key is test_row_0/B:col10/1733982105875/Put/seqid=0 2024-12-12T05:41:46,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742384_1560 (size=12151) 2024-12-12T05:41:46,700 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=185 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/0200cf1bf01d423d8f7048db297d3bcc 2024-12-12T05:41:46,704 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/d6867165bd364852a42779da1e75d7ad is 50, key is test_row_0/C:col10/1733982105875/Put/seqid=0 2024-12-12T05:41:46,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742385_1561 (size=12151) 2024-12-12T05:41:46,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-12T05:41:46,805 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:46,805 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-12T05:41:46,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:46,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:46,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:46,806 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:46,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:46,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:46,957 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:46,958 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-12T05:41:46,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:46,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:46,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:46,958 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:46,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:46,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:46,987 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T05:41:47,021 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:47,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982167020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:47,022 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:47,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982167021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:47,107 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=185 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/d6867165bd364852a42779da1e75d7ad 2024-12-12T05:41:47,109 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:47,110 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/420c0ef6842b4031910b8e7d6df91666 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/420c0ef6842b4031910b8e7d6df91666 2024-12-12T05:41:47,110 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-12T05:41:47,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:47,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:47,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:47,110 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:47,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:47,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:47,112 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/420c0ef6842b4031910b8e7d6df91666, entries=400, sequenceid=185, filesize=72.7 K 2024-12-12T05:41:47,113 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/0200cf1bf01d423d8f7048db297d3bcc as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/0200cf1bf01d423d8f7048db297d3bcc 2024-12-12T05:41:47,115 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/0200cf1bf01d423d8f7048db297d3bcc, entries=150, sequenceid=185, filesize=11.9 K 2024-12-12T05:41:47,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/d6867165bd364852a42779da1e75d7ad as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/d6867165bd364852a42779da1e75d7ad 2024-12-12T05:41:47,118 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/d6867165bd364852a42779da1e75d7ad, entries=150, sequenceid=185, filesize=11.9 K 2024-12-12T05:41:47,118 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 7af3549cbdbd66c1f5a0c758d39edf04 in 1243ms, sequenceid=185, compaction requested=true 2024-12-12T05:41:47,118 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:47,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7af3549cbdbd66c1f5a0c758d39edf04:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:41:47,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:47,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7af3549cbdbd66c1f5a0c758d39edf04:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:41:47,119 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:47,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:47,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7af3549cbdbd66c1f5a0c758d39edf04:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:41:47,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T05:41:47,119 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:47,119 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 136947 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:47,119 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 7af3549cbdbd66c1f5a0c758d39edf04/A is initiating minor compaction (all files) 2024-12-12T05:41:47,119 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7af3549cbdbd66c1f5a0c758d39edf04/A in TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:47,120 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/345edd1a11b841018869a7a253f4f9cf, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/c9300a5295d8438cb27ae59b75f88cf6, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/420c0ef6842b4031910b8e7d6df91666] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp, totalSize=133.7 K 2024-12-12T05:41:47,120 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:47,120 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 7af3549cbdbd66c1f5a0c758d39edf04/B is initiating minor compaction (all files) 2024-12-12T05:41:47,120 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:47,120 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. files: [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/345edd1a11b841018869a7a253f4f9cf, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/c9300a5295d8438cb27ae59b75f88cf6, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/420c0ef6842b4031910b8e7d6df91666] 2024-12-12T05:41:47,120 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7af3549cbdbd66c1f5a0c758d39edf04/B in TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:47,120 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/dfbb5bbd2f344692850300ff7da3690f, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/af31bc4aa8474673969b40b77a1eb497, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/0200cf1bf01d423d8f7048db297d3bcc] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp, totalSize=35.9 K 2024-12-12T05:41:47,120 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 345edd1a11b841018869a7a253f4f9cf, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733982103097 2024-12-12T05:41:47,120 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting dfbb5bbd2f344692850300ff7da3690f, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733982103097 2024-12-12T05:41:47,120 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting c9300a5295d8438cb27ae59b75f88cf6, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733982103719 2024-12-12T05:41:47,120 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting af31bc4aa8474673969b40b77a1eb497, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733982103719 2024-12-12T05:41:47,120 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 420c0ef6842b4031910b8e7d6df91666, keycount=400, bloomtype=ROW, size=72.7 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1733982105854 2024-12-12T05:41:47,122 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 0200cf1bf01d423d8f7048db297d3bcc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1733982105871 2024-12-12T05:41:47,127 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:47,128 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121288d63acc0fe3401d875f358fbfe10789_7af3549cbdbd66c1f5a0c758d39edf04 store=[table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:47,130 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7af3549cbdbd66c1f5a0c758d39edf04#B#compaction#472 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:47,130 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/5365ed7fd8214015a591197d1dd67404 is 50, key is test_row_0/B:col10/1733982105875/Put/seqid=0 2024-12-12T05:41:47,131 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121288d63acc0fe3401d875f358fbfe10789_7af3549cbdbd66c1f5a0c758d39edf04, store=[table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:47,131 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121288d63acc0fe3401d875f358fbfe10789_7af3549cbdbd66c1f5a0c758d39edf04 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:47,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742386_1562 (size=12595) 2024-12-12T05:41:47,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742387_1563 (size=4469) 2024-12-12T05:41:47,261 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:47,262 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-12T05:41:47,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:47,262 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2837): Flushing 7af3549cbdbd66c1f5a0c758d39edf04 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T05:41:47,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=A 2024-12-12T05:41:47,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:47,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=B 2024-12-12T05:41:47,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:47,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=C 2024-12-12T05:41:47,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:47,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412128548f7c2e9d24494a9474cf63c9a7a22_7af3549cbdbd66c1f5a0c758d39edf04 is 50, key is test_row_0/A:col10/1733982105906/Put/seqid=0 2024-12-12T05:41:47,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742388_1564 (size=12304) 2024-12-12T05:41:47,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-12T05:41:47,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:47,477 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:47,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:47,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982167485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:47,488 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:47,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982167486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:47,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:47,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51112 deadline: 1733982167492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:47,495 DEBUG [Thread-2276 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8166 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04., hostname=83e80bf221ca,46457,1733981928566, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:41:47,536 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7af3549cbdbd66c1f5a0c758d39edf04#A#compaction#471 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:47,537 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/0f1fc15db72d4e848495a2ab0c22dcac is 175, key is test_row_0/A:col10/1733982105875/Put/seqid=0 2024-12-12T05:41:47,537 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/5365ed7fd8214015a591197d1dd67404 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/5365ed7fd8214015a591197d1dd67404 2024-12-12T05:41:47,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742389_1565 (size=31549) 2024-12-12T05:41:47,540 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7af3549cbdbd66c1f5a0c758d39edf04/B of 7af3549cbdbd66c1f5a0c758d39edf04 into 5365ed7fd8214015a591197d1dd67404(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:47,540 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:47,540 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04., storeName=7af3549cbdbd66c1f5a0c758d39edf04/B, priority=13, startTime=1733982107119; duration=0sec 2024-12-12T05:41:47,540 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:47,540 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7af3549cbdbd66c1f5a0c758d39edf04:B 2024-12-12T05:41:47,540 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:47,541 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:47,541 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 7af3549cbdbd66c1f5a0c758d39edf04/C is initiating minor compaction (all files) 2024-12-12T05:41:47,542 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7af3549cbdbd66c1f5a0c758d39edf04/C in TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:47,542 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/64187b422f7a492ca1208a464babff9c, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/fdd51d443af94dc898a540de63fb9aa7, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/d6867165bd364852a42779da1e75d7ad] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp, totalSize=35.9 K 2024-12-12T05:41:47,542 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 64187b422f7a492ca1208a464babff9c, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733982103097 2024-12-12T05:41:47,542 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting fdd51d443af94dc898a540de63fb9aa7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733982103719 2024-12-12T05:41:47,542 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting d6867165bd364852a42779da1e75d7ad, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1733982105871 2024-12-12T05:41:47,543 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/0f1fc15db72d4e848495a2ab0c22dcac as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/0f1fc15db72d4e848495a2ab0c22dcac 2024-12-12T05:41:47,546 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7af3549cbdbd66c1f5a0c758d39edf04/A of 7af3549cbdbd66c1f5a0c758d39edf04 into 0f1fc15db72d4e848495a2ab0c22dcac(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:47,546 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:47,546 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04., storeName=7af3549cbdbd66c1f5a0c758d39edf04/A, priority=13, startTime=1733982107118; duration=0sec 2024-12-12T05:41:47,546 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:47,546 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7af3549cbdbd66c1f5a0c758d39edf04:A 2024-12-12T05:41:47,547 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7af3549cbdbd66c1f5a0c758d39edf04#C#compaction#474 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:47,547 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/39fdbf943e8740a0a958645b322ac2dc is 50, key is test_row_0/C:col10/1733982105875/Put/seqid=0 2024-12-12T05:41:47,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742390_1566 (size=12595) 2024-12-12T05:41:47,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:47,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982167588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:47,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:47,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982167589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:47,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:47,673 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412128548f7c2e9d24494a9474cf63c9a7a22_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412128548f7c2e9d24494a9474cf63c9a7a22_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:47,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/6abce5b28e3a45d396f25fb19dec2738, store: [table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:47,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/6abce5b28e3a45d396f25fb19dec2738 is 175, key is test_row_0/A:col10/1733982105906/Put/seqid=0 2024-12-12T05:41:47,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742391_1567 (size=31105) 2024-12-12T05:41:47,791 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:47,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982167790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:47,792 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:47,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982167791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:47,959 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/39fdbf943e8740a0a958645b322ac2dc as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/39fdbf943e8740a0a958645b322ac2dc 2024-12-12T05:41:47,961 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7af3549cbdbd66c1f5a0c758d39edf04/C of 7af3549cbdbd66c1f5a0c758d39edf04 into 39fdbf943e8740a0a958645b322ac2dc(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:47,962 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:47,962 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04., storeName=7af3549cbdbd66c1f5a0c758d39edf04/C, priority=13, startTime=1733982107119; duration=0sec 2024-12-12T05:41:47,962 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:47,962 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7af3549cbdbd66c1f5a0c758d39edf04:C 2024-12-12T05:41:48,028 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:48,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982168027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:48,028 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:48,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982168027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:48,077 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=210, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/6abce5b28e3a45d396f25fb19dec2738 2024-12-12T05:41:48,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/37f917d1b1df49f1a2efcacf9af09f92 is 50, key is test_row_0/B:col10/1733982105906/Put/seqid=0 2024-12-12T05:41:48,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742392_1568 (size=12151) 2024-12-12T05:41:48,095 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:48,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982168094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:48,096 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:48,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982168095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:48,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-12T05:41:48,485 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/37f917d1b1df49f1a2efcacf9af09f92 2024-12-12T05:41:48,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/1b59c33262d44818b90a49fa736bb3b7 is 50, key is test_row_0/C:col10/1733982105906/Put/seqid=0 2024-12-12T05:41:48,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742393_1569 (size=12151) 2024-12-12T05:41:48,492 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/1b59c33262d44818b90a49fa736bb3b7 2024-12-12T05:41:48,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/6abce5b28e3a45d396f25fb19dec2738 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/6abce5b28e3a45d396f25fb19dec2738 2024-12-12T05:41:48,503 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/6abce5b28e3a45d396f25fb19dec2738, entries=150, sequenceid=210, filesize=30.4 K 2024-12-12T05:41:48,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/37f917d1b1df49f1a2efcacf9af09f92 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/37f917d1b1df49f1a2efcacf9af09f92 2024-12-12T05:41:48,506 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/37f917d1b1df49f1a2efcacf9af09f92, entries=150, sequenceid=210, filesize=11.9 K 2024-12-12T05:41:48,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/1b59c33262d44818b90a49fa736bb3b7 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/1b59c33262d44818b90a49fa736bb3b7 2024-12-12T05:41:48,509 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/1b59c33262d44818b90a49fa736bb3b7, entries=150, sequenceid=210, filesize=11.9 K 2024-12-12T05:41:48,510 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 7af3549cbdbd66c1f5a0c758d39edf04 in 1248ms, sequenceid=210, compaction requested=false 2024-12-12T05:41:48,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2538): Flush status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:48,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:48,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=162 2024-12-12T05:41:48,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=162 2024-12-12T05:41:48,512 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-12-12T05:41:48,512 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3130 sec 2024-12-12T05:41:48,513 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees in 2.3170 sec 2024-12-12T05:41:48,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:48,598 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7af3549cbdbd66c1f5a0c758d39edf04 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-12T05:41:48,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=A 2024-12-12T05:41:48,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:48,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=B 2024-12-12T05:41:48,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:48,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=C 2024-12-12T05:41:48,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:48,603 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121248f8821b42af42df89ee21a562a967b1_7af3549cbdbd66c1f5a0c758d39edf04 is 50, key is test_row_0/A:col10/1733982107485/Put/seqid=0 2024-12-12T05:41:48,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742394_1570 (size=12304) 2024-12-12T05:41:48,621 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:48,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982168619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:48,622 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:48,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982168621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:48,722 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:48,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982168722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:48,724 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:48,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982168723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:48,925 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:48,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982168924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:48,926 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:48,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982168925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:49,010 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:49,012 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121248f8821b42af42df89ee21a562a967b1_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121248f8821b42af42df89ee21a562a967b1_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:49,013 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/9ec4ca5bcba3462f9bc8cad9149d1870, store: [table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:49,013 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/9ec4ca5bcba3462f9bc8cad9149d1870 is 175, key is test_row_0/A:col10/1733982107485/Put/seqid=0 2024-12-12T05:41:49,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742395_1571 (size=31105) 2024-12-12T05:41:49,228 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:49,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982169227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:49,228 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:49,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982169227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:49,416 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=226, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/9ec4ca5bcba3462f9bc8cad9149d1870 2024-12-12T05:41:49,421 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/b4e3c05b2f9a41a283fb5e4a854f30c5 is 50, key is test_row_0/B:col10/1733982107485/Put/seqid=0 2024-12-12T05:41:49,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742396_1572 (size=12151) 2024-12-12T05:41:49,423 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=226 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/b4e3c05b2f9a41a283fb5e4a854f30c5 2024-12-12T05:41:49,428 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/f564de5394e140b4848d0efac71774b3 is 50, key is test_row_0/C:col10/1733982107485/Put/seqid=0 2024-12-12T05:41:49,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742397_1573 (size=12151) 2024-12-12T05:41:49,732 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:49,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982169731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:49,732 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:49,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982169731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:49,831 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=226 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/f564de5394e140b4848d0efac71774b3 2024-12-12T05:41:49,834 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/9ec4ca5bcba3462f9bc8cad9149d1870 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/9ec4ca5bcba3462f9bc8cad9149d1870 2024-12-12T05:41:49,836 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/9ec4ca5bcba3462f9bc8cad9149d1870, entries=150, sequenceid=226, filesize=30.4 K 2024-12-12T05:41:49,837 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/b4e3c05b2f9a41a283fb5e4a854f30c5 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/b4e3c05b2f9a41a283fb5e4a854f30c5 2024-12-12T05:41:49,839 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/b4e3c05b2f9a41a283fb5e4a854f30c5, entries=150, sequenceid=226, filesize=11.9 K 2024-12-12T05:41:49,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/f564de5394e140b4848d0efac71774b3 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/f564de5394e140b4848d0efac71774b3 2024-12-12T05:41:49,842 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/f564de5394e140b4848d0efac71774b3, entries=150, sequenceid=226, filesize=11.9 K 2024-12-12T05:41:49,843 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 7af3549cbdbd66c1f5a0c758d39edf04 in 1244ms, sequenceid=226, compaction requested=true 2024-12-12T05:41:49,843 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:49,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7af3549cbdbd66c1f5a0c758d39edf04:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:41:49,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:49,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7af3549cbdbd66c1f5a0c758d39edf04:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:41:49,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:49,843 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:49,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7af3549cbdbd66c1f5a0c758d39edf04:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:41:49,843 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:49,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:49,843 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93759 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:49,843 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 7af3549cbdbd66c1f5a0c758d39edf04/A is initiating minor compaction (all files) 2024-12-12T05:41:49,843 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7af3549cbdbd66c1f5a0c758d39edf04/A in TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:49,844 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/0f1fc15db72d4e848495a2ab0c22dcac, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/6abce5b28e3a45d396f25fb19dec2738, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/9ec4ca5bcba3462f9bc8cad9149d1870] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp, totalSize=91.6 K 2024-12-12T05:41:49,844 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:49,844 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. files: [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/0f1fc15db72d4e848495a2ab0c22dcac, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/6abce5b28e3a45d396f25fb19dec2738, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/9ec4ca5bcba3462f9bc8cad9149d1870] 2024-12-12T05:41:49,844 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:49,844 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 7af3549cbdbd66c1f5a0c758d39edf04/B is initiating minor compaction (all files) 2024-12-12T05:41:49,844 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7af3549cbdbd66c1f5a0c758d39edf04/B in TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:49,844 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0f1fc15db72d4e848495a2ab0c22dcac, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1733982105871 2024-12-12T05:41:49,844 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/5365ed7fd8214015a591197d1dd67404, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/37f917d1b1df49f1a2efcacf9af09f92, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/b4e3c05b2f9a41a283fb5e4a854f30c5] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp, totalSize=36.0 K 2024-12-12T05:41:49,844 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6abce5b28e3a45d396f25fb19dec2738, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733982105901 2024-12-12T05:41:49,844 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 5365ed7fd8214015a591197d1dd67404, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1733982105871 2024-12-12T05:41:49,844 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9ec4ca5bcba3462f9bc8cad9149d1870, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=226, earliestPutTs=1733982107481 2024-12-12T05:41:49,844 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 37f917d1b1df49f1a2efcacf9af09f92, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733982105901 2024-12-12T05:41:49,844 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting b4e3c05b2f9a41a283fb5e4a854f30c5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=226, earliestPutTs=1733982107481 2024-12-12T05:41:49,852 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:49,852 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7af3549cbdbd66c1f5a0c758d39edf04#B#compaction#480 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:49,853 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/46f8e70fd6164b2cb35dca049723ae8b is 50, key is test_row_0/B:col10/1733982107485/Put/seqid=0 2024-12-12T05:41:49,854 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121258532f52de0d450e86188fd2e0b9d242_7af3549cbdbd66c1f5a0c758d39edf04 store=[table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:49,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742398_1574 (size=12697) 2024-12-12T05:41:49,856 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121258532f52de0d450e86188fd2e0b9d242_7af3549cbdbd66c1f5a0c758d39edf04, store=[table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:49,856 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121258532f52de0d450e86188fd2e0b9d242_7af3549cbdbd66c1f5a0c758d39edf04 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:49,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742399_1575 (size=4469) 2024-12-12T05:41:49,862 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7af3549cbdbd66c1f5a0c758d39edf04#A#compaction#481 average throughput is 2.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:49,862 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/71f279d707924e9089eace71bd46e25a is 175, key is test_row_0/A:col10/1733982107485/Put/seqid=0 2024-12-12T05:41:49,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742400_1576 (size=31651) 2024-12-12T05:41:50,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:50,047 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7af3549cbdbd66c1f5a0c758d39edf04 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T05:41:50,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=A 2024-12-12T05:41:50,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:50,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=B 2024-12-12T05:41:50,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:50,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=C 2024-12-12T05:41:50,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:50,052 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212ba4ebc4009864b6baae26b919236bb68_7af3549cbdbd66c1f5a0c758d39edf04 is 50, key is test_row_0/A:col10/1733982108615/Put/seqid=0 2024-12-12T05:41:50,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742401_1577 (size=14794) 2024-12-12T05:41:50,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:50,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982170058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:50,061 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:50,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982170059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:50,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:50,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982170160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:50,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:50,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982170161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:50,259 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/46f8e70fd6164b2cb35dca049723ae8b as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/46f8e70fd6164b2cb35dca049723ae8b 2024-12-12T05:41:50,262 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7af3549cbdbd66c1f5a0c758d39edf04/B of 7af3549cbdbd66c1f5a0c758d39edf04 into 46f8e70fd6164b2cb35dca049723ae8b(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:50,262 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:50,262 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04., storeName=7af3549cbdbd66c1f5a0c758d39edf04/B, priority=13, startTime=1733982109843; duration=0sec 2024-12-12T05:41:50,262 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:50,262 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7af3549cbdbd66c1f5a0c758d39edf04:B 2024-12-12T05:41:50,262 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T05:41:50,263 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T05:41:50,263 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 7af3549cbdbd66c1f5a0c758d39edf04/C is initiating minor compaction (all files) 2024-12-12T05:41:50,263 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7af3549cbdbd66c1f5a0c758d39edf04/C in TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:50,263 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/39fdbf943e8740a0a958645b322ac2dc, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/1b59c33262d44818b90a49fa736bb3b7, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/f564de5394e140b4848d0efac71774b3] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp, totalSize=36.0 K 2024-12-12T05:41:50,263 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 39fdbf943e8740a0a958645b322ac2dc, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1733982105871 2024-12-12T05:41:50,263 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b59c33262d44818b90a49fa736bb3b7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733982105901 2024-12-12T05:41:50,264 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting f564de5394e140b4848d0efac71774b3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=226, earliestPutTs=1733982107481 2024-12-12T05:41:50,269 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7af3549cbdbd66c1f5a0c758d39edf04#C#compaction#483 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:50,269 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/71f279d707924e9089eace71bd46e25a as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/71f279d707924e9089eace71bd46e25a 2024-12-12T05:41:50,269 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/a3d7d2e3137c439783f21f0fc583bf58 is 50, key is test_row_0/C:col10/1733982107485/Put/seqid=0 2024-12-12T05:41:50,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742402_1578 (size=12697) 2024-12-12T05:41:50,273 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7af3549cbdbd66c1f5a0c758d39edf04/A of 7af3549cbdbd66c1f5a0c758d39edf04 into 71f279d707924e9089eace71bd46e25a(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:50,273 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:50,273 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04., storeName=7af3549cbdbd66c1f5a0c758d39edf04/A, priority=13, startTime=1733982109843; duration=0sec 2024-12-12T05:41:50,273 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:50,273 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7af3549cbdbd66c1f5a0c758d39edf04:A 2024-12-12T05:41:50,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-12T05:41:50,301 INFO [Thread-2284 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 161 completed 2024-12-12T05:41:50,302 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T05:41:50,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-12-12T05:41:50,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-12T05:41:50,303 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T05:41:50,303 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T05:41:50,303 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T05:41:50,363 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:50,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982170362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:50,364 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:50,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982170363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:50,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-12T05:41:50,454 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:50,455 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-12T05:41:50,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:50,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:50,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:50,455 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:50,455 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:50,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:50,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:50,459 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212ba4ebc4009864b6baae26b919236bb68_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212ba4ebc4009864b6baae26b919236bb68_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:50,460 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/5d11e9e09dc7411eb7c0793803940ff4, store: [table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:50,461 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/5d11e9e09dc7411eb7c0793803940ff4 is 175, key is test_row_0/A:col10/1733982108615/Put/seqid=0 2024-12-12T05:41:50,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742403_1579 (size=39749) 2024-12-12T05:41:50,464 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=250, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/5d11e9e09dc7411eb7c0793803940ff4 2024-12-12T05:41:50,470 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/21db5cef677b4e1aa5f04231af885ffe is 50, key is test_row_0/B:col10/1733982108615/Put/seqid=0 2024-12-12T05:41:50,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742404_1580 (size=12151) 2024-12-12T05:41:50,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-12T05:41:50,607 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:50,607 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-12T05:41:50,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:50,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:50,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:50,608 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:50,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:50,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:50,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:50,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982170665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:50,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:50,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982170665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:50,675 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/a3d7d2e3137c439783f21f0fc583bf58 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/a3d7d2e3137c439783f21f0fc583bf58 2024-12-12T05:41:50,678 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7af3549cbdbd66c1f5a0c758d39edf04/C of 7af3549cbdbd66c1f5a0c758d39edf04 into a3d7d2e3137c439783f21f0fc583bf58(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:50,678 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:50,678 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04., storeName=7af3549cbdbd66c1f5a0c758d39edf04/C, priority=13, startTime=1733982109843; duration=0sec 2024-12-12T05:41:50,678 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:50,678 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7af3549cbdbd66c1f5a0c758d39edf04:C 2024-12-12T05:41:50,742 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:50,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982170740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:50,742 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:50,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982170741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:50,759 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:50,760 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-12T05:41:50,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:50,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:50,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:50,760 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:50,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:50,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:50,877 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/21db5cef677b4e1aa5f04231af885ffe 2024-12-12T05:41:50,882 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/1fda93068fa44add87f4f29c735028be is 50, key is test_row_0/C:col10/1733982108615/Put/seqid=0 2024-12-12T05:41:50,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742405_1581 (size=12151) 2024-12-12T05:41:50,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-12T05:41:50,912 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:50,912 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-12T05:41:50,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:50,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:50,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:50,912 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:50,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:50,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:51,064 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:51,064 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-12T05:41:51,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:51,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:51,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:51,064 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:51,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:51,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:51,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:51,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982171167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:51,169 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:51,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982171168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:51,216 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:51,216 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-12T05:41:51,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:51,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:51,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:51,217 ERROR [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:51,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:51,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T05:41:51,285 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/1fda93068fa44add87f4f29c735028be 2024-12-12T05:41:51,288 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/5d11e9e09dc7411eb7c0793803940ff4 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/5d11e9e09dc7411eb7c0793803940ff4 2024-12-12T05:41:51,290 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/5d11e9e09dc7411eb7c0793803940ff4, entries=200, sequenceid=250, filesize=38.8 K 2024-12-12T05:41:51,290 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/21db5cef677b4e1aa5f04231af885ffe as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/21db5cef677b4e1aa5f04231af885ffe 2024-12-12T05:41:51,292 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/21db5cef677b4e1aa5f04231af885ffe, entries=150, sequenceid=250, filesize=11.9 K 2024-12-12T05:41:51,293 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/1fda93068fa44add87f4f29c735028be as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/1fda93068fa44add87f4f29c735028be 2024-12-12T05:41:51,295 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/1fda93068fa44add87f4f29c735028be, entries=150, sequenceid=250, filesize=11.9 K 2024-12-12T05:41:51,295 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 7af3549cbdbd66c1f5a0c758d39edf04 in 1249ms, sequenceid=250, compaction requested=false 2024-12-12T05:41:51,295 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:51,369 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:51,369 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46457 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-12T05:41:51,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:51,370 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing 7af3549cbdbd66c1f5a0c758d39edf04 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T05:41:51,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=A 2024-12-12T05:41:51,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:51,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=B 2024-12-12T05:41:51,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:51,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=C 2024-12-12T05:41:51,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:51,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121204ceb30334cd4a70be531585348870f0_7af3549cbdbd66c1f5a0c758d39edf04 is 50, key is test_row_0/A:col10/1733982110057/Put/seqid=0 2024-12-12T05:41:51,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742406_1582 (size=12404) 2024-12-12T05:41:51,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-12T05:41:51,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:51,780 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121204ceb30334cd4a70be531585348870f0_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121204ceb30334cd4a70be531585348870f0_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:51,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/1ca012331fa340e7b9fa05cc61c957d0, store: [table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:51,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/1ca012331fa340e7b9fa05cc61c957d0 is 175, key is test_row_0/A:col10/1733982110057/Put/seqid=0 2024-12-12T05:41:51,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742407_1583 (size=31205) 2024-12-12T05:41:52,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:52,175 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. as already flushing 2024-12-12T05:41:52,197 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=265, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/1ca012331fa340e7b9fa05cc61c957d0 2024-12-12T05:41:52,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/d51cd074b89d4d5e974975a9b45c2989 is 50, key is test_row_0/B:col10/1733982110057/Put/seqid=0 2024-12-12T05:41:52,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:52,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982172201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:52,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742408_1584 (size=12251) 2024-12-12T05:41:52,205 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:52,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982172204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:52,305 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:52,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982172304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:52,307 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:52,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982172306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:52,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-12T05:41:52,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:52,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982172507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:52,509 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:52,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982172509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:52,605 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=265 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/d51cd074b89d4d5e974975a9b45c2989 2024-12-12T05:41:52,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/5c8c0a1f7e5e420eb1836de2c30814d2 is 50, key is test_row_0/C:col10/1733982110057/Put/seqid=0 2024-12-12T05:41:52,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742409_1585 (size=12251) 2024-12-12T05:41:52,754 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:52,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51126 deadline: 1733982172754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:52,755 DEBUG [Thread-2282 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4134 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04., hostname=83e80bf221ca,46457,1733981928566, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:41:52,759 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:52,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51160 deadline: 1733982172758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:52,759 DEBUG [Thread-2274 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4140 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04., hostname=83e80bf221ca,46457,1733981928566, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T05:41:52,764 DEBUG [Thread-2293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7287c75d to 127.0.0.1:60303 2024-12-12T05:41:52,764 DEBUG [Thread-2291 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5f48b1c2 to 127.0.0.1:60303 2024-12-12T05:41:52,764 DEBUG [Thread-2293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:41:52,764 DEBUG [Thread-2291 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:41:52,766 DEBUG [Thread-2285 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4d832d43 to 127.0.0.1:60303 2024-12-12T05:41:52,766 DEBUG [Thread-2285 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:41:52,766 DEBUG [Thread-2289 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x439b60d5 to 127.0.0.1:60303 2024-12-12T05:41:52,766 DEBUG [Thread-2289 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:41:52,766 DEBUG [Thread-2287 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x15b6349f to 127.0.0.1:60303 2024-12-12T05:41:52,766 DEBUG [Thread-2287 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:41:52,809 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:52,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51090 deadline: 1733982172809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:52,811 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T05:41:52,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51106 deadline: 1733982172811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:53,014 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=265 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/5c8c0a1f7e5e420eb1836de2c30814d2 2024-12-12T05:41:53,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/1ca012331fa340e7b9fa05cc61c957d0 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/1ca012331fa340e7b9fa05cc61c957d0 2024-12-12T05:41:53,027 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/1ca012331fa340e7b9fa05cc61c957d0, entries=150, sequenceid=265, filesize=30.5 K 2024-12-12T05:41:53,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/d51cd074b89d4d5e974975a9b45c2989 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/d51cd074b89d4d5e974975a9b45c2989 2024-12-12T05:41:53,031 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/d51cd074b89d4d5e974975a9b45c2989, entries=150, sequenceid=265, filesize=12.0 K 2024-12-12T05:41:53,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/5c8c0a1f7e5e420eb1836de2c30814d2 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/5c8c0a1f7e5e420eb1836de2c30814d2 2024-12-12T05:41:53,036 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/5c8c0a1f7e5e420eb1836de2c30814d2, entries=150, sequenceid=265, filesize=12.0 K 2024-12-12T05:41:53,037 INFO [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 7af3549cbdbd66c1f5a0c758d39edf04 in 1667ms, sequenceid=265, compaction requested=true 2024-12-12T05:41:53,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:53,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:53,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83e80bf221ca:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-12-12T05:41:53,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-12-12T05:41:53,040 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-12-12T05:41:53,040 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7350 sec 2024-12-12T05:41:53,041 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 2.7380 sec 2024-12-12T05:41:53,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46457 {}] regionserver.HRegion(8581): Flush requested on 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:53,317 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7af3549cbdbd66c1f5a0c758d39edf04 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T05:41:53,318 DEBUG [Thread-2280 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6bb6288a to 127.0.0.1:60303 2024-12-12T05:41:53,318 DEBUG [Thread-2280 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:41:53,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=A 2024-12-12T05:41:53,320 DEBUG [Thread-2278 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53305d9b to 127.0.0.1:60303 2024-12-12T05:41:53,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:53,320 DEBUG [Thread-2278 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:41:53,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=B 2024-12-12T05:41:53,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:53,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=C 2024-12-12T05:41:53,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:53,328 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212d6f0caf5d263438082f5ec320816c6e5_7af3549cbdbd66c1f5a0c758d39edf04 is 50, key is test_row_0/A:col10/1733982113317/Put/seqid=0 2024-12-12T05:41:53,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742410_1586 (size=12454) 2024-12-12T05:41:53,732 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:53,738 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212d6f0caf5d263438082f5ec320816c6e5_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212d6f0caf5d263438082f5ec320816c6e5_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:53,739 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/9cb8df20c2814132a214811dd2bcf920, store: [table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:53,740 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/9cb8df20c2814132a214811dd2bcf920 is 175, key is test_row_0/A:col10/1733982113317/Put/seqid=0 2024-12-12T05:41:53,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742411_1587 (size=31255) 2024-12-12T05:41:54,145 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=291, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/9cb8df20c2814132a214811dd2bcf920 2024-12-12T05:41:54,157 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/22533c33bb774e3ba8f54ed35a6eb44d is 50, key is test_row_0/B:col10/1733982113317/Put/seqid=0 2024-12-12T05:41:54,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742412_1588 (size=12301) 2024-12-12T05:41:54,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-12T05:41:54,409 INFO [Thread-2284 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-12-12T05:41:54,562 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/22533c33bb774e3ba8f54ed35a6eb44d 2024-12-12T05:41:54,574 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/1d203fa673c0427a956e870408eb7b02 is 50, key is test_row_0/C:col10/1733982113317/Put/seqid=0 2024-12-12T05:41:54,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742413_1589 (size=12301) 2024-12-12T05:41:54,979 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/1d203fa673c0427a956e870408eb7b02 2024-12-12T05:41:54,988 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/9cb8df20c2814132a214811dd2bcf920 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/9cb8df20c2814132a214811dd2bcf920 2024-12-12T05:41:54,992 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/9cb8df20c2814132a214811dd2bcf920, entries=150, sequenceid=291, filesize=30.5 K 2024-12-12T05:41:54,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/22533c33bb774e3ba8f54ed35a6eb44d as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/22533c33bb774e3ba8f54ed35a6eb44d 2024-12-12T05:41:54,994 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/22533c33bb774e3ba8f54ed35a6eb44d, entries=150, sequenceid=291, filesize=12.0 K 2024-12-12T05:41:54,995 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/1d203fa673c0427a956e870408eb7b02 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/1d203fa673c0427a956e870408eb7b02 2024-12-12T05:41:54,997 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/1d203fa673c0427a956e870408eb7b02, entries=150, sequenceid=291, filesize=12.0 K 2024-12-12T05:41:54,997 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=0 B/0 for 7af3549cbdbd66c1f5a0c758d39edf04 in 1680ms, sequenceid=291, compaction requested=true 2024-12-12T05:41:54,997 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:54,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7af3549cbdbd66c1f5a0c758d39edf04:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T05:41:54,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:54,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7af3549cbdbd66c1f5a0c758d39edf04:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T05:41:54,998 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:54,997 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T05:41:54,998 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7af3549cbdbd66c1f5a0c758d39edf04:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T05:41:54,998 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:54,998 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T05:41:54,998 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133860 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T05:41:54,998 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49400 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T05:41:54,998 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1540): 7af3549cbdbd66c1f5a0c758d39edf04/A is initiating minor compaction (all files) 2024-12-12T05:41:54,998 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 7af3549cbdbd66c1f5a0c758d39edf04/B is initiating minor compaction (all files) 2024-12-12T05:41:54,998 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7af3549cbdbd66c1f5a0c758d39edf04/B in TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:54,998 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7af3549cbdbd66c1f5a0c758d39edf04/A in TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:54,998 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/46f8e70fd6164b2cb35dca049723ae8b, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/21db5cef677b4e1aa5f04231af885ffe, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/d51cd074b89d4d5e974975a9b45c2989, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/22533c33bb774e3ba8f54ed35a6eb44d] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp, totalSize=48.2 K 2024-12-12T05:41:54,998 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/71f279d707924e9089eace71bd46e25a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/5d11e9e09dc7411eb7c0793803940ff4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/1ca012331fa340e7b9fa05cc61c957d0, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/9cb8df20c2814132a214811dd2bcf920] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp, totalSize=130.7 K 2024-12-12T05:41:54,998 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:54,998 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. files: [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/71f279d707924e9089eace71bd46e25a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/5d11e9e09dc7411eb7c0793803940ff4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/1ca012331fa340e7b9fa05cc61c957d0, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/9cb8df20c2814132a214811dd2bcf920] 2024-12-12T05:41:54,999 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 46f8e70fd6164b2cb35dca049723ae8b, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=226, earliestPutTs=1733982107481 2024-12-12T05:41:54,999 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 71f279d707924e9089eace71bd46e25a, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=226, earliestPutTs=1733982107481 2024-12-12T05:41:54,999 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 21db5cef677b4e1aa5f04231af885ffe, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1733982108615 2024-12-12T05:41:54,999 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d11e9e09dc7411eb7c0793803940ff4, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1733982108615 2024-12-12T05:41:54,999 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting d51cd074b89d4d5e974975a9b45c2989, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1733982110051 2024-12-12T05:41:54,999 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 1ca012331fa340e7b9fa05cc61c957d0, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1733982110051 2024-12-12T05:41:54,999 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 22533c33bb774e3ba8f54ed35a6eb44d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733982112193 2024-12-12T05:41:54,999 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] compactions.Compactor(224): Compacting 9cb8df20c2814132a214811dd2bcf920, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733982112193 2024-12-12T05:41:55,004 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:55,006 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7af3549cbdbd66c1f5a0c758d39edf04#B#compaction#492 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:55,006 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212753922e658544c158c4f7ff053933bed_7af3549cbdbd66c1f5a0c758d39edf04 store=[table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:55,006 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/d51a5669685a4f81a591da7387309aaf is 50, key is test_row_0/B:col10/1733982113317/Put/seqid=0 2024-12-12T05:41:55,008 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212753922e658544c158c4f7ff053933bed_7af3549cbdbd66c1f5a0c758d39edf04, store=[table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:55,008 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212753922e658544c158c4f7ff053933bed_7af3549cbdbd66c1f5a0c758d39edf04 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:55,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742414_1590 (size=12983) 2024-12-12T05:41:55,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742415_1591 (size=4469) 2024-12-12T05:41:55,417 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7af3549cbdbd66c1f5a0c758d39edf04#A#compaction#493 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:55,418 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/fbf96d879d364085955d33c250f4301a is 175, key is test_row_0/A:col10/1733982113317/Put/seqid=0 2024-12-12T05:41:55,420 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/d51a5669685a4f81a591da7387309aaf as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/d51a5669685a4f81a591da7387309aaf 2024-12-12T05:41:55,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742416_1592 (size=31937) 2024-12-12T05:41:55,425 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7af3549cbdbd66c1f5a0c758d39edf04/B of 7af3549cbdbd66c1f5a0c758d39edf04 into d51a5669685a4f81a591da7387309aaf(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:55,425 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:55,425 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04., storeName=7af3549cbdbd66c1f5a0c758d39edf04/B, priority=12, startTime=1733982114997; duration=0sec 2024-12-12T05:41:55,425 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T05:41:55,425 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7af3549cbdbd66c1f5a0c758d39edf04:B 2024-12-12T05:41:55,425 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T05:41:55,426 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49400 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T05:41:55,426 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1540): 7af3549cbdbd66c1f5a0c758d39edf04/C is initiating minor compaction (all files) 2024-12-12T05:41:55,426 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7af3549cbdbd66c1f5a0c758d39edf04/C in TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:55,426 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/a3d7d2e3137c439783f21f0fc583bf58, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/1fda93068fa44add87f4f29c735028be, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/5c8c0a1f7e5e420eb1836de2c30814d2, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/1d203fa673c0427a956e870408eb7b02] into tmpdir=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp, totalSize=48.2 K 2024-12-12T05:41:55,427 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting a3d7d2e3137c439783f21f0fc583bf58, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=226, earliestPutTs=1733982107481 2024-12-12T05:41:55,427 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1fda93068fa44add87f4f29c735028be, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1733982108615 2024-12-12T05:41:55,427 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5c8c0a1f7e5e420eb1836de2c30814d2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1733982110051 2024-12-12T05:41:55,428 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1d203fa673c0427a956e870408eb7b02, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733982112193 2024-12-12T05:41:55,435 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7af3549cbdbd66c1f5a0c758d39edf04#C#compaction#494 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T05:41:55,435 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/8d55383523954951986619efb1d7b66f is 50, key is test_row_0/C:col10/1733982113317/Put/seqid=0 2024-12-12T05:41:55,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742417_1593 (size=12983) 2024-12-12T05:41:55,831 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/fbf96d879d364085955d33c250f4301a as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/fbf96d879d364085955d33c250f4301a 2024-12-12T05:41:55,836 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7af3549cbdbd66c1f5a0c758d39edf04/A of 7af3549cbdbd66c1f5a0c758d39edf04 into fbf96d879d364085955d33c250f4301a(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:55,836 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:55,836 INFO [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04., storeName=7af3549cbdbd66c1f5a0c758d39edf04/A, priority=12, startTime=1733982114997; duration=0sec 2024-12-12T05:41:55,836 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:55,836 DEBUG [RS:0;83e80bf221ca:46457-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7af3549cbdbd66c1f5a0c758d39edf04:A 2024-12-12T05:41:55,843 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/8d55383523954951986619efb1d7b66f as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/8d55383523954951986619efb1d7b66f 2024-12-12T05:41:55,849 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7af3549cbdbd66c1f5a0c758d39edf04/C of 7af3549cbdbd66c1f5a0c758d39edf04 into 8d55383523954951986619efb1d7b66f(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T05:41:55,849 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:55,849 INFO [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04., storeName=7af3549cbdbd66c1f5a0c758d39edf04/C, priority=12, startTime=1733982114998; duration=0sec 2024-12-12T05:41:55,849 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T05:41:55,849 DEBUG [RS:0;83e80bf221ca:46457-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7af3549cbdbd66c1f5a0c758d39edf04:C 2024-12-12T05:41:56,792 DEBUG [Thread-2274 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x091d72db to 127.0.0.1:60303 2024-12-12T05:41:56,792 DEBUG [Thread-2282 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06556601 to 127.0.0.1:60303 2024-12-12T05:41:56,792 DEBUG [Thread-2282 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:41:56,792 DEBUG [Thread-2274 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:41:57,514 DEBUG [Thread-2276 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5d836f78 to 127.0.0.1:60303 2024-12-12T05:41:57,515 DEBUG [Thread-2276 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:41:57,515 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-12T05:41:57,515 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 37 2024-12-12T05:41:57,515 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 34 2024-12-12T05:41:57,515 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 64 2024-12-12T05:41:57,515 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 56 2024-12-12T05:41:57,516 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 39 2024-12-12T05:41:57,516 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-12T05:41:57,516 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8580 2024-12-12T05:41:57,516 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8636 2024-12-12T05:41:57,516 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8506 2024-12-12T05:41:57,516 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8587 2024-12-12T05:41:57,516 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8625 2024-12-12T05:41:57,516 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-12T05:41:57,516 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-12T05:41:57,516 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2d7fe431 to 127.0.0.1:60303 2024-12-12T05:41:57,516 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:41:57,517 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-12T05:41:57,518 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-12T05:41:57,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-12T05:41:57,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-12T05:41:57,542 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733982117542"}]},"ts":"1733982117542"} 2024-12-12T05:41:57,544 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-12T05:41:57,601 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-12T05:41:57,603 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T05:41:57,606 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=167, ppid=166, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7af3549cbdbd66c1f5a0c758d39edf04, UNASSIGN}] 2024-12-12T05:41:57,607 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=167, ppid=166, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7af3549cbdbd66c1f5a0c758d39edf04, UNASSIGN 2024-12-12T05:41:57,609 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=167 updating hbase:meta row=7af3549cbdbd66c1f5a0c758d39edf04, regionState=CLOSING, regionLocation=83e80bf221ca,46457,1733981928566 2024-12-12T05:41:57,611 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T05:41:57,611 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; CloseRegionProcedure 7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566}] 2024-12-12T05:41:57,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-12T05:41:57,764 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:57,765 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] handler.UnassignRegionHandler(124): Close 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:57,765 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T05:41:57,766 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HRegion(1681): Closing 7af3549cbdbd66c1f5a0c758d39edf04, disabling compactions & flushes 2024-12-12T05:41:57,766 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:57,766 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:57,766 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. after waiting 0 ms 2024-12-12T05:41:57,766 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:57,766 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HRegion(2837): Flushing 7af3549cbdbd66c1f5a0c758d39edf04 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-12-12T05:41:57,766 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=A 2024-12-12T05:41:57,767 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:57,767 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=B 2024-12-12T05:41:57,767 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:57,767 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7af3549cbdbd66c1f5a0c758d39edf04, store=C 2024-12-12T05:41:57,767 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T05:41:57,779 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412120a828f1ef69047a18ad3a1e2778aaca3_7af3549cbdbd66c1f5a0c758d39edf04 is 50, key is test_row_0/A:col10/1733982117511/Put/seqid=0 2024-12-12T05:41:57,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742418_1594 (size=12454) 2024-12-12T05:41:57,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-12T05:41:58,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-12T05:41:58,184 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T05:41:58,192 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412120a828f1ef69047a18ad3a1e2778aaca3_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412120a828f1ef69047a18ad3a1e2778aaca3_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:58,193 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/63f4b5d27ede4b07970b5ed8a83c93e1, store: [table=TestAcidGuarantees family=A region=7af3549cbdbd66c1f5a0c758d39edf04] 2024-12-12T05:41:58,194 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/63f4b5d27ede4b07970b5ed8a83c93e1 is 175, key is test_row_0/A:col10/1733982117511/Put/seqid=0 2024-12-12T05:41:58,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742419_1595 (size=31255) 2024-12-12T05:41:58,601 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=300, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/63f4b5d27ede4b07970b5ed8a83c93e1 2024-12-12T05:41:58,613 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/5c37669993f7475cb9db461a632433e0 is 50, key is test_row_0/B:col10/1733982117511/Put/seqid=0 2024-12-12T05:41:58,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742420_1596 (size=12301) 2024-12-12T05:41:58,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-12T05:41:59,018 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=300 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/5c37669993f7475cb9db461a632433e0 2024-12-12T05:41:59,031 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/3912932e05664300bd3f252fba724179 is 50, key is test_row_0/C:col10/1733982117511/Put/seqid=0 2024-12-12T05:41:59,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742421_1597 (size=12301) 2024-12-12T05:41:59,437 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=300 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/3912932e05664300bd3f252fba724179 2024-12-12T05:41:59,445 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/A/63f4b5d27ede4b07970b5ed8a83c93e1 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/63f4b5d27ede4b07970b5ed8a83c93e1 2024-12-12T05:41:59,450 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/63f4b5d27ede4b07970b5ed8a83c93e1, entries=150, sequenceid=300, filesize=30.5 K 2024-12-12T05:41:59,451 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/B/5c37669993f7475cb9db461a632433e0 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/5c37669993f7475cb9db461a632433e0 2024-12-12T05:41:59,454 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/5c37669993f7475cb9db461a632433e0, entries=150, sequenceid=300, filesize=12.0 K 2024-12-12T05:41:59,455 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/.tmp/C/3912932e05664300bd3f252fba724179 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/3912932e05664300bd3f252fba724179 2024-12-12T05:41:59,457 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/3912932e05664300bd3f252fba724179, entries=150, sequenceid=300, filesize=12.0 K 2024-12-12T05:41:59,458 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 7af3549cbdbd66c1f5a0c758d39edf04 in 1692ms, sequenceid=300, compaction requested=false 2024-12-12T05:41:59,459 DEBUG [StoreCloser-TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/15abe53eddbb43ce8c19cda0eb3cc0cc, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/a78dc5f406904d5a98fe68565e3b7d3d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/44378ac5375943f68198c207b135ecfd, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/febbab45559142c6b3eee959009e0639, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/2dbc79757b434da89bb38280fb24633f, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/c046502c44f64421a7a0fa98bdf97524, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/35f7ef3f5c5a4e30be05fa98e3d02d77, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/4eb1479ecbe64779bf4c0531d28cc724, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/65b430ee4eb345c6a30768f4e0690520, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/345edd1a11b841018869a7a253f4f9cf, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/4a3f1079615c477fbe61927640f45bd1, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/c9300a5295d8438cb27ae59b75f88cf6, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/420c0ef6842b4031910b8e7d6df91666, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/0f1fc15db72d4e848495a2ab0c22dcac, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/6abce5b28e3a45d396f25fb19dec2738, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/71f279d707924e9089eace71bd46e25a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/9ec4ca5bcba3462f9bc8cad9149d1870, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/5d11e9e09dc7411eb7c0793803940ff4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/1ca012331fa340e7b9fa05cc61c957d0, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/9cb8df20c2814132a214811dd2bcf920] to archive 2024-12-12T05:41:59,459 DEBUG [StoreCloser-TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T05:41:59,463 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/15abe53eddbb43ce8c19cda0eb3cc0cc to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/15abe53eddbb43ce8c19cda0eb3cc0cc 2024-12-12T05:41:59,463 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/a78dc5f406904d5a98fe68565e3b7d3d to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/a78dc5f406904d5a98fe68565e3b7d3d 2024-12-12T05:41:59,463 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/44378ac5375943f68198c207b135ecfd to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/44378ac5375943f68198c207b135ecfd 2024-12-12T05:41:59,464 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/febbab45559142c6b3eee959009e0639 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/febbab45559142c6b3eee959009e0639 2024-12-12T05:41:59,464 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/c046502c44f64421a7a0fa98bdf97524 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/c046502c44f64421a7a0fa98bdf97524 2024-12-12T05:41:59,464 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/2dbc79757b434da89bb38280fb24633f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/2dbc79757b434da89bb38280fb24633f 2024-12-12T05:41:59,465 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/35f7ef3f5c5a4e30be05fa98e3d02d77 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/35f7ef3f5c5a4e30be05fa98e3d02d77 2024-12-12T05:41:59,465 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/4eb1479ecbe64779bf4c0531d28cc724 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/4eb1479ecbe64779bf4c0531d28cc724 2024-12-12T05:41:59,465 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/65b430ee4eb345c6a30768f4e0690520 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/65b430ee4eb345c6a30768f4e0690520 2024-12-12T05:41:59,465 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/345edd1a11b841018869a7a253f4f9cf to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/345edd1a11b841018869a7a253f4f9cf 2024-12-12T05:41:59,466 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/4a3f1079615c477fbe61927640f45bd1 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/4a3f1079615c477fbe61927640f45bd1 2024-12-12T05:41:59,466 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/0f1fc15db72d4e848495a2ab0c22dcac to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/0f1fc15db72d4e848495a2ab0c22dcac 2024-12-12T05:41:59,466 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/c9300a5295d8438cb27ae59b75f88cf6 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/c9300a5295d8438cb27ae59b75f88cf6 2024-12-12T05:41:59,466 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/420c0ef6842b4031910b8e7d6df91666 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/420c0ef6842b4031910b8e7d6df91666 2024-12-12T05:41:59,467 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/6abce5b28e3a45d396f25fb19dec2738 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/6abce5b28e3a45d396f25fb19dec2738 2024-12-12T05:41:59,467 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/9ec4ca5bcba3462f9bc8cad9149d1870 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/9ec4ca5bcba3462f9bc8cad9149d1870 2024-12-12T05:41:59,467 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/71f279d707924e9089eace71bd46e25a to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/71f279d707924e9089eace71bd46e25a 2024-12-12T05:41:59,467 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/1ca012331fa340e7b9fa05cc61c957d0 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/1ca012331fa340e7b9fa05cc61c957d0 2024-12-12T05:41:59,467 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/9cb8df20c2814132a214811dd2bcf920 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/9cb8df20c2814132a214811dd2bcf920 2024-12-12T05:41:59,476 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/5d11e9e09dc7411eb7c0793803940ff4 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/5d11e9e09dc7411eb7c0793803940ff4 2024-12-12T05:41:59,478 DEBUG [StoreCloser-TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/6861d002ced84ab2b7b4061d0bb0c9d4, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/23ddf3c777204949b7ddd20550e32590, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/c7c4e71a5f1c4f8a9faa9dd0816c1fb1, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/f9c9df745e2b4ce895b6d290bf74dc6c, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/45027ce135954d03a866f9c57b2f6b16, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/476ac6508ba64836ad1b746e8bae7b9a, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/152412867bc24733ae3e68f8e846c1fa, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/cc7f0eb25ba9480e83249c6ee1d2d85f, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/4b412d7303f842abb3eced0ff8e07727, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/dfbb5bbd2f344692850300ff7da3690f, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/a928803aef5542d4bf1d52b231fe694d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/af31bc4aa8474673969b40b77a1eb497, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/5365ed7fd8214015a591197d1dd67404, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/0200cf1bf01d423d8f7048db297d3bcc, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/37f917d1b1df49f1a2efcacf9af09f92, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/46f8e70fd6164b2cb35dca049723ae8b, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/b4e3c05b2f9a41a283fb5e4a854f30c5, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/21db5cef677b4e1aa5f04231af885ffe, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/d51cd074b89d4d5e974975a9b45c2989, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/22533c33bb774e3ba8f54ed35a6eb44d] to archive 2024-12-12T05:41:59,479 DEBUG [StoreCloser-TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T05:41:59,480 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/c7c4e71a5f1c4f8a9faa9dd0816c1fb1 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/c7c4e71a5f1c4f8a9faa9dd0816c1fb1 2024-12-12T05:41:59,480 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/6861d002ced84ab2b7b4061d0bb0c9d4 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/6861d002ced84ab2b7b4061d0bb0c9d4 2024-12-12T05:41:59,480 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/45027ce135954d03a866f9c57b2f6b16 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/45027ce135954d03a866f9c57b2f6b16 2024-12-12T05:41:59,480 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/476ac6508ba64836ad1b746e8bae7b9a to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/476ac6508ba64836ad1b746e8bae7b9a 2024-12-12T05:41:59,480 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/cc7f0eb25ba9480e83249c6ee1d2d85f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/cc7f0eb25ba9480e83249c6ee1d2d85f 2024-12-12T05:41:59,481 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/152412867bc24733ae3e68f8e846c1fa to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/152412867bc24733ae3e68f8e846c1fa 2024-12-12T05:41:59,481 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/f9c9df745e2b4ce895b6d290bf74dc6c to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/f9c9df745e2b4ce895b6d290bf74dc6c 2024-12-12T05:41:59,481 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/23ddf3c777204949b7ddd20550e32590 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/23ddf3c777204949b7ddd20550e32590 2024-12-12T05:41:59,481 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/dfbb5bbd2f344692850300ff7da3690f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/dfbb5bbd2f344692850300ff7da3690f 2024-12-12T05:41:59,481 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/4b412d7303f842abb3eced0ff8e07727 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/4b412d7303f842abb3eced0ff8e07727 2024-12-12T05:41:59,482 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/af31bc4aa8474673969b40b77a1eb497 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/af31bc4aa8474673969b40b77a1eb497 2024-12-12T05:41:59,482 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/a928803aef5542d4bf1d52b231fe694d to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/a928803aef5542d4bf1d52b231fe694d 2024-12-12T05:41:59,482 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/0200cf1bf01d423d8f7048db297d3bcc to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/0200cf1bf01d423d8f7048db297d3bcc 2024-12-12T05:41:59,482 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/37f917d1b1df49f1a2efcacf9af09f92 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/37f917d1b1df49f1a2efcacf9af09f92 2024-12-12T05:41:59,482 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/46f8e70fd6164b2cb35dca049723ae8b to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/46f8e70fd6164b2cb35dca049723ae8b 2024-12-12T05:41:59,482 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/5365ed7fd8214015a591197d1dd67404 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/5365ed7fd8214015a591197d1dd67404 2024-12-12T05:41:59,483 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/b4e3c05b2f9a41a283fb5e4a854f30c5 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/b4e3c05b2f9a41a283fb5e4a854f30c5 2024-12-12T05:41:59,483 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/21db5cef677b4e1aa5f04231af885ffe to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/21db5cef677b4e1aa5f04231af885ffe 2024-12-12T05:41:59,483 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/d51cd074b89d4d5e974975a9b45c2989 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/d51cd074b89d4d5e974975a9b45c2989 2024-12-12T05:41:59,483 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/22533c33bb774e3ba8f54ed35a6eb44d to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/22533c33bb774e3ba8f54ed35a6eb44d 2024-12-12T05:41:59,487 DEBUG [StoreCloser-TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/1a6c34fc98c84c729dbaf07cf3cfb4ac, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/8b881784030f4c93ae4d1ebf9929c45d, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/652e5b4d235b44a3802b8fc1b94e3107, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/d3c5d9c69fc64ed3960b8981557eedb7, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/8047f779c8734d41bd328835cdaf6b32, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/7407d334f34e4a669772437508578a8b, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/e3331178e478466ab17a38baa58da6c1, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/968db790594048f380c5475e5fae77e9, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/a96cf69a1edf42729e5564a15540d194, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/64187b422f7a492ca1208a464babff9c, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/a930753693e140d18a7eea36c2b16b2c, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/fdd51d443af94dc898a540de63fb9aa7, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/39fdbf943e8740a0a958645b322ac2dc, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/d6867165bd364852a42779da1e75d7ad, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/1b59c33262d44818b90a49fa736bb3b7, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/a3d7d2e3137c439783f21f0fc583bf58, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/f564de5394e140b4848d0efac71774b3, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/1fda93068fa44add87f4f29c735028be, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/5c8c0a1f7e5e420eb1836de2c30814d2, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/1d203fa673c0427a956e870408eb7b02] to archive 2024-12-12T05:41:59,488 DEBUG [StoreCloser-TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T05:41:59,490 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/d3c5d9c69fc64ed3960b8981557eedb7 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/d3c5d9c69fc64ed3960b8981557eedb7 2024-12-12T05:41:59,490 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/7407d334f34e4a669772437508578a8b to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/7407d334f34e4a669772437508578a8b 2024-12-12T05:41:59,490 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/8047f779c8734d41bd328835cdaf6b32 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/8047f779c8734d41bd328835cdaf6b32 2024-12-12T05:41:59,490 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/e3331178e478466ab17a38baa58da6c1 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/e3331178e478466ab17a38baa58da6c1 2024-12-12T05:41:59,490 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/8b881784030f4c93ae4d1ebf9929c45d to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/8b881784030f4c93ae4d1ebf9929c45d 2024-12-12T05:41:59,490 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/1a6c34fc98c84c729dbaf07cf3cfb4ac to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/1a6c34fc98c84c729dbaf07cf3cfb4ac 2024-12-12T05:41:59,490 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/652e5b4d235b44a3802b8fc1b94e3107 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/652e5b4d235b44a3802b8fc1b94e3107 2024-12-12T05:41:59,490 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/968db790594048f380c5475e5fae77e9 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/968db790594048f380c5475e5fae77e9 2024-12-12T05:41:59,491 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/a96cf69a1edf42729e5564a15540d194 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/a96cf69a1edf42729e5564a15540d194 2024-12-12T05:41:59,491 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/fdd51d443af94dc898a540de63fb9aa7 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/fdd51d443af94dc898a540de63fb9aa7 2024-12-12T05:41:59,491 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/39fdbf943e8740a0a958645b322ac2dc to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/39fdbf943e8740a0a958645b322ac2dc 2024-12-12T05:41:59,491 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/a930753693e140d18a7eea36c2b16b2c to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/a930753693e140d18a7eea36c2b16b2c 2024-12-12T05:41:59,491 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/64187b422f7a492ca1208a464babff9c to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/64187b422f7a492ca1208a464babff9c 2024-12-12T05:41:59,492 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/d6867165bd364852a42779da1e75d7ad to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/d6867165bd364852a42779da1e75d7ad 2024-12-12T05:41:59,493 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/1fda93068fa44add87f4f29c735028be to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/1fda93068fa44add87f4f29c735028be 2024-12-12T05:41:59,493 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/f564de5394e140b4848d0efac71774b3 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/f564de5394e140b4848d0efac71774b3 2024-12-12T05:41:59,493 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/5c8c0a1f7e5e420eb1836de2c30814d2 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/5c8c0a1f7e5e420eb1836de2c30814d2 2024-12-12T05:41:59,493 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/1d203fa673c0427a956e870408eb7b02 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/1d203fa673c0427a956e870408eb7b02 2024-12-12T05:41:59,493 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/a3d7d2e3137c439783f21f0fc583bf58 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/a3d7d2e3137c439783f21f0fc583bf58 2024-12-12T05:41:59,493 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/1b59c33262d44818b90a49fa736bb3b7 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/1b59c33262d44818b90a49fa736bb3b7 2024-12-12T05:41:59,498 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/recovered.edits/303.seqid, newMaxSeqId=303, maxSeqId=4 2024-12-12T05:41:59,498 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04. 2024-12-12T05:41:59,498 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HRegion(1635): Region close journal for 7af3549cbdbd66c1f5a0c758d39edf04: 2024-12-12T05:41:59,499 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] handler.UnassignRegionHandler(170): Closed 7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:59,500 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=167 updating hbase:meta row=7af3549cbdbd66c1f5a0c758d39edf04, regionState=CLOSED 2024-12-12T05:41:59,501 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-12-12T05:41:59,501 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; CloseRegionProcedure 7af3549cbdbd66c1f5a0c758d39edf04, server=83e80bf221ca,46457,1733981928566 in 1.8890 sec 2024-12-12T05:41:59,502 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=167, resume processing ppid=166 2024-12-12T05:41:59,502 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, ppid=166, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7af3549cbdbd66c1f5a0c758d39edf04, UNASSIGN in 1.8950 sec 2024-12-12T05:41:59,503 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-12-12T05:41:59,503 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8990 sec 2024-12-12T05:41:59,503 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733982119503"}]},"ts":"1733982119503"} 2024-12-12T05:41:59,504 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-12T05:41:59,548 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-12T05:41:59,550 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.0310 sec 2024-12-12T05:41:59,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-12T05:41:59,651 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-12-12T05:41:59,652 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-12T05:41:59,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:41:59,655 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=169, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:41:59,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-12T05:41:59,657 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=169, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:41:59,661 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:59,665 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A, FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B, FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C, FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/recovered.edits] 2024-12-12T05:41:59,668 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/fbf96d879d364085955d33c250f4301a to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/fbf96d879d364085955d33c250f4301a 2024-12-12T05:41:59,669 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/63f4b5d27ede4b07970b5ed8a83c93e1 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/A/63f4b5d27ede4b07970b5ed8a83c93e1 2024-12-12T05:41:59,671 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/5c37669993f7475cb9db461a632433e0 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/5c37669993f7475cb9db461a632433e0 2024-12-12T05:41:59,671 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/d51a5669685a4f81a591da7387309aaf to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/B/d51a5669685a4f81a591da7387309aaf 2024-12-12T05:41:59,674 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/8d55383523954951986619efb1d7b66f to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/8d55383523954951986619efb1d7b66f 2024-12-12T05:41:59,674 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/3912932e05664300bd3f252fba724179 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/C/3912932e05664300bd3f252fba724179 2024-12-12T05:41:59,676 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/recovered.edits/303.seqid to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04/recovered.edits/303.seqid 2024-12-12T05:41:59,677 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/default/TestAcidGuarantees/7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:59,677 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-12T05:41:59,677 DEBUG [PEWorker-3 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-12T05:41:59,678 DEBUG [PEWorker-3 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-12T05:41:59,683 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412120a828f1ef69047a18ad3a1e2778aaca3_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412120a828f1ef69047a18ad3a1e2778aaca3_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:59,683 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121204ceb30334cd4a70be531585348870f0_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121204ceb30334cd4a70be531585348870f0_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:59,683 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212129b817b1490423dbba46584ce223a83_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212129b817b1490423dbba46584ce223a83_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:59,684 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121248f8821b42af42df89ee21a562a967b1_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121248f8821b42af42df89ee21a562a967b1_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:59,684 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212415a26455ee649fca220bfe2120a2b6f_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212415a26455ee649fca220bfe2120a2b6f_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:59,684 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212161108dd1aaf4a92bbd14e9761eca64c_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212161108dd1aaf4a92bbd14e9761eca64c_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:59,684 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121243fdd95e5acc4521883481810741b802_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121243fdd95e5acc4521883481810741b802_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:59,684 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412120aef8e239ef74ff68f63ddd9c6c9a3b1_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412120aef8e239ef74ff68f63ddd9c6c9a3b1_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:59,685 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212565837bf89db40679798dd50fe8c1ba8_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212565837bf89db40679798dd50fe8c1ba8_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:59,685 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412128548f7c2e9d24494a9474cf63c9a7a22_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412128548f7c2e9d24494a9474cf63c9a7a22_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:59,685 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412128502c7a939154c4682f99d02a5257c24_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412128502c7a939154c4682f99d02a5257c24_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:59,685 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121293d6940bc5bc43df8d80ddc0d508d666_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121293d6940bc5bc43df8d80ddc0d508d666_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:59,685 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c6c4e6dadcd44c4c9b8b5e6cb11df539_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c6c4e6dadcd44c4c9b8b5e6cb11df539_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:59,685 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212ba4ebc4009864b6baae26b919236bb68_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212ba4ebc4009864b6baae26b919236bb68_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:59,685 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212d6f0caf5d263438082f5ec320816c6e5_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212d6f0caf5d263438082f5ec320816c6e5_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:59,686 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212ee990e0e4e9c4974ae9c8102fc405d48_7af3549cbdbd66c1f5a0c758d39edf04 to hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212ee990e0e4e9c4974ae9c8102fc405d48_7af3549cbdbd66c1f5a0c758d39edf04 2024-12-12T05:41:59,686 DEBUG [PEWorker-3 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-12T05:41:59,688 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=169, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:41:59,690 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-12T05:41:59,692 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-12T05:41:59,693 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=169, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:41:59,693 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-12T05:41:59,693 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733982119693"}]},"ts":"9223372036854775807"} 2024-12-12T05:41:59,694 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-12T05:41:59,694 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 7af3549cbdbd66c1f5a0c758d39edf04, NAME => 'TestAcidGuarantees,,1733982089507.7af3549cbdbd66c1f5a0c758d39edf04.', STARTKEY => '', ENDKEY => ''}] 2024-12-12T05:41:59,695 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-12T05:41:59,695 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733982119695"}]},"ts":"9223372036854775807"} 2024-12-12T05:41:59,696 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-12T05:41:59,707 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=169, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T05:41:59,708 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 55 msec 2024-12-12T05:41:59,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34751 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-12T05:41:59,758 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-12-12T05:41:59,765 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=246 (was 245) - Thread LEAK? -, OpenFileDescriptor=458 (was 448) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=265 (was 287), ProcessCount=11 (was 11), AvailableMemoryMB=13214 (was 13234) 2024-12-12T05:41:59,765 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-12T05:41:59,765 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-12T05:41:59,765 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7e541e88 to 127.0.0.1:60303 2024-12-12T05:41:59,765 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:41:59,765 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-12T05:41:59,766 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=669332021, stopped=false 2024-12-12T05:41:59,766 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=83e80bf221ca,34751,1733981927819 2024-12-12T05:41:59,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-12T05:41:59,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x10018bf93040001, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-12T05:41:59,776 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-12T05:41:59,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:41:59,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x10018bf93040001, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:41:59,776 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:41:59,776 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T05:41:59,776 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46457-0x10018bf93040001, quorum=127.0.0.1:60303, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T05:41:59,776 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '83e80bf221ca,46457,1733981928566' ***** 2024-12-12T05:41:59,777 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-12T05:41:59,777 INFO [RS:0;83e80bf221ca:46457 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-12T05:41:59,777 INFO [RS:0;83e80bf221ca:46457 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-12T05:41:59,777 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-12T05:41:59,777 INFO [RS:0;83e80bf221ca:46457 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-12T05:41:59,777 INFO [RS:0;83e80bf221ca:46457 {}] regionserver.HRegionServer(3579): Received CLOSE for b675848e5b5abf83ab0aa0c34e08f9b3 2024-12-12T05:41:59,777 INFO [RS:0;83e80bf221ca:46457 {}] regionserver.HRegionServer(1224): stopping server 83e80bf221ca,46457,1733981928566 2024-12-12T05:41:59,777 DEBUG [RS:0;83e80bf221ca:46457 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:41:59,777 INFO [RS:0;83e80bf221ca:46457 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-12T05:41:59,777 INFO [RS:0;83e80bf221ca:46457 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-12T05:41:59,777 INFO [RS:0;83e80bf221ca:46457 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-12T05:41:59,777 INFO [RS:0;83e80bf221ca:46457 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-12T05:41:59,778 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing b675848e5b5abf83ab0aa0c34e08f9b3, disabling compactions & flushes 2024-12-12T05:41:59,778 INFO [RS:0;83e80bf221ca:46457 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-12T05:41:59,778 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733981932754.b675848e5b5abf83ab0aa0c34e08f9b3. 2024-12-12T05:41:59,778 DEBUG [RS:0;83e80bf221ca:46457 {}] regionserver.HRegionServer(1603): Online Regions={b675848e5b5abf83ab0aa0c34e08f9b3=hbase:namespace,,1733981932754.b675848e5b5abf83ab0aa0c34e08f9b3., 1588230740=hbase:meta,,1.1588230740} 2024-12-12T05:41:59,778 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733981932754.b675848e5b5abf83ab0aa0c34e08f9b3. 2024-12-12T05:41:59,778 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733981932754.b675848e5b5abf83ab0aa0c34e08f9b3. after waiting 0 ms 2024-12-12T05:41:59,778 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733981932754.b675848e5b5abf83ab0aa0c34e08f9b3. 2024-12-12T05:41:59,778 DEBUG [RS_CLOSE_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-12T05:41:59,778 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing b675848e5b5abf83ab0aa0c34e08f9b3 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-12T05:41:59,778 INFO [RS_CLOSE_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-12T05:41:59,778 DEBUG [RS_CLOSE_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-12T05:41:59,778 DEBUG [RS_CLOSE_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-12T05:41:59,778 DEBUG [RS_CLOSE_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-12T05:41:59,778 INFO [RS_CLOSE_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-12-12T05:41:59,780 DEBUG [RS:0;83e80bf221ca:46457 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, b675848e5b5abf83ab0aa0c34e08f9b3 2024-12-12T05:41:59,791 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/namespace/b675848e5b5abf83ab0aa0c34e08f9b3/.tmp/info/cada1bcf80a84ba6896041fe4ab5f193 is 45, key is default/info:d/1733981934214/Put/seqid=0 2024-12-12T05:41:59,793 INFO [regionserver/83e80bf221ca:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T05:41:59,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742422_1598 (size=5037) 2024-12-12T05:41:59,796 DEBUG [RS_CLOSE_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/meta/1588230740/.tmp/info/80a65a0a20384f31a6ffdf0e53473c51 is 143, key is hbase:namespace,,1733981932754.b675848e5b5abf83ab0aa0c34e08f9b3./info:regioninfo/1733981934069/Put/seqid=0 2024-12-12T05:41:59,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742423_1599 (size=7725) 2024-12-12T05:41:59,981 DEBUG [RS:0;83e80bf221ca:46457 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, b675848e5b5abf83ab0aa0c34e08f9b3 2024-12-12T05:42:00,181 DEBUG [RS:0;83e80bf221ca:46457 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, b675848e5b5abf83ab0aa0c34e08f9b3 2024-12-12T05:42:00,196 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/namespace/b675848e5b5abf83ab0aa0c34e08f9b3/.tmp/info/cada1bcf80a84ba6896041fe4ab5f193 2024-12-12T05:42:00,200 INFO [RS_CLOSE_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/meta/1588230740/.tmp/info/80a65a0a20384f31a6ffdf0e53473c51 2024-12-12T05:42:00,205 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/namespace/b675848e5b5abf83ab0aa0c34e08f9b3/.tmp/info/cada1bcf80a84ba6896041fe4ab5f193 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/namespace/b675848e5b5abf83ab0aa0c34e08f9b3/info/cada1bcf80a84ba6896041fe4ab5f193 2024-12-12T05:42:00,210 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/namespace/b675848e5b5abf83ab0aa0c34e08f9b3/info/cada1bcf80a84ba6896041fe4ab5f193, entries=2, sequenceid=6, filesize=4.9 K 2024-12-12T05:42:00,211 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for b675848e5b5abf83ab0aa0c34e08f9b3 in 433ms, sequenceid=6, compaction requested=false 2024-12-12T05:42:00,215 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/namespace/b675848e5b5abf83ab0aa0c34e08f9b3/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T05:42:00,215 INFO [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733981932754.b675848e5b5abf83ab0aa0c34e08f9b3. 2024-12-12T05:42:00,215 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for b675848e5b5abf83ab0aa0c34e08f9b3: 2024-12-12T05:42:00,215 DEBUG [RS_CLOSE_REGION-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733981932754.b675848e5b5abf83ab0aa0c34e08f9b3. 2024-12-12T05:42:00,223 DEBUG [RS_CLOSE_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/meta/1588230740/.tmp/rep_barrier/61adf92680bf4f8992f277ce46fd8969 is 102, key is TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7./rep_barrier:/1733981962853/DeleteFamily/seqid=0 2024-12-12T05:42:00,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742424_1600 (size=6025) 2024-12-12T05:42:00,277 INFO [regionserver/83e80bf221ca:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-12T05:42:00,277 INFO [regionserver/83e80bf221ca:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-12T05:42:00,381 DEBUG [RS:0;83e80bf221ca:46457 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-12T05:42:00,582 DEBUG [RS:0;83e80bf221ca:46457 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-12T05:42:00,628 INFO [RS_CLOSE_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/meta/1588230740/.tmp/rep_barrier/61adf92680bf4f8992f277ce46fd8969 2024-12-12T05:42:00,652 DEBUG [RS_CLOSE_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/meta/1588230740/.tmp/table/0550d0071b0b45d492d835787d7a246f is 96, key is TestAcidGuarantees,,1733981934512.61279763b720b7a9988338e6150d61c7./table:/1733981962853/DeleteFamily/seqid=0 2024-12-12T05:42:00,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742425_1601 (size=5942) 2024-12-12T05:42:00,782 INFO [RS:0;83e80bf221ca:46457 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-12-12T05:42:00,782 DEBUG [RS:0;83e80bf221ca:46457 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-12T05:42:00,782 DEBUG [RS:0;83e80bf221ca:46457 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-12T05:42:00,983 DEBUG [RS:0;83e80bf221ca:46457 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-12T05:42:01,057 INFO [RS_CLOSE_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/meta/1588230740/.tmp/table/0550d0071b0b45d492d835787d7a246f 2024-12-12T05:42:01,067 DEBUG [RS_CLOSE_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/meta/1588230740/.tmp/info/80a65a0a20384f31a6ffdf0e53473c51 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/meta/1588230740/info/80a65a0a20384f31a6ffdf0e53473c51 2024-12-12T05:42:01,071 INFO [RS_CLOSE_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/meta/1588230740/info/80a65a0a20384f31a6ffdf0e53473c51, entries=22, sequenceid=93, filesize=7.5 K 2024-12-12T05:42:01,073 DEBUG [RS_CLOSE_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/meta/1588230740/.tmp/rep_barrier/61adf92680bf4f8992f277ce46fd8969 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/meta/1588230740/rep_barrier/61adf92680bf4f8992f277ce46fd8969 2024-12-12T05:42:01,077 INFO [RS_CLOSE_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/meta/1588230740/rep_barrier/61adf92680bf4f8992f277ce46fd8969, entries=6, sequenceid=93, filesize=5.9 K 2024-12-12T05:42:01,077 DEBUG [RS_CLOSE_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/meta/1588230740/.tmp/table/0550d0071b0b45d492d835787d7a246f as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/meta/1588230740/table/0550d0071b0b45d492d835787d7a246f 2024-12-12T05:42:01,081 INFO [RS_CLOSE_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/meta/1588230740/table/0550d0071b0b45d492d835787d7a246f, entries=9, sequenceid=93, filesize=5.8 K 2024-12-12T05:42:01,082 INFO [RS_CLOSE_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1304ms, sequenceid=93, compaction requested=false 2024-12-12T05:42:01,086 DEBUG [RS_CLOSE_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-12-12T05:42:01,086 DEBUG [RS_CLOSE_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-12T05:42:01,086 INFO [RS_CLOSE_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-12T05:42:01,086 DEBUG [RS_CLOSE_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-12T05:42:01,087 DEBUG [RS_CLOSE_META-regionserver/83e80bf221ca:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-12T05:42:01,183 INFO [RS:0;83e80bf221ca:46457 {}] regionserver.HRegionServer(1250): stopping server 83e80bf221ca,46457,1733981928566; all regions closed. 2024-12-12T05:42:01,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741834_1010 (size=26050) 2024-12-12T05:42:01,196 DEBUG [RS:0;83e80bf221ca:46457 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/oldWALs 2024-12-12T05:42:01,196 INFO [RS:0;83e80bf221ca:46457 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 83e80bf221ca%2C46457%2C1733981928566.meta:.meta(num 1733981932490) 2024-12-12T05:42:01,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741832_1008 (size=13425950) 2024-12-12T05:42:01,202 DEBUG [RS:0;83e80bf221ca:46457 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/oldWALs 2024-12-12T05:42:01,202 INFO [RS:0;83e80bf221ca:46457 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 83e80bf221ca%2C46457%2C1733981928566:(num 1733981931430) 2024-12-12T05:42:01,202 DEBUG [RS:0;83e80bf221ca:46457 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:42:01,202 INFO [RS:0;83e80bf221ca:46457 {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T05:42:01,203 INFO [RS:0;83e80bf221ca:46457 {}] hbase.ChoreService(370): Chore service for: regionserver/83e80bf221ca:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-12T05:42:01,203 INFO [regionserver/83e80bf221ca:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-12T05:42:01,204 INFO [RS:0;83e80bf221ca:46457 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:46457 2024-12-12T05:42:01,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-12T05:42:01,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x10018bf93040001, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/83e80bf221ca,46457,1733981928566 2024-12-12T05:42:01,226 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [83e80bf221ca,46457,1733981928566] 2024-12-12T05:42:01,226 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 83e80bf221ca,46457,1733981928566; numProcessing=1 2024-12-12T05:42:01,234 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/83e80bf221ca,46457,1733981928566 already deleted, retry=false 2024-12-12T05:42:01,234 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 83e80bf221ca,46457,1733981928566 expired; onlineServers=0 2024-12-12T05:42:01,235 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '83e80bf221ca,34751,1733981927819' ***** 2024-12-12T05:42:01,235 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-12T05:42:01,235 DEBUG [M:0;83e80bf221ca:34751 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40ecf8f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=83e80bf221ca/172.17.0.2:0 2024-12-12T05:42:01,235 INFO [M:0;83e80bf221ca:34751 {}] regionserver.HRegionServer(1224): stopping server 83e80bf221ca,34751,1733981927819 2024-12-12T05:42:01,235 INFO [M:0;83e80bf221ca:34751 {}] regionserver.HRegionServer(1250): stopping server 83e80bf221ca,34751,1733981927819; all regions closed. 2024-12-12T05:42:01,235 DEBUG [M:0;83e80bf221ca:34751 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T05:42:01,235 DEBUG [M:0;83e80bf221ca:34751 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-12T05:42:01,236 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-12T05:42:01,236 DEBUG [M:0;83e80bf221ca:34751 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-12T05:42:01,236 DEBUG [master/83e80bf221ca:0:becomeActiveMaster-HFileCleaner.small.0-1733981931190 {}] cleaner.HFileCleaner(306): Exit Thread[master/83e80bf221ca:0:becomeActiveMaster-HFileCleaner.small.0-1733981931190,5,FailOnTimeoutGroup] 2024-12-12T05:42:01,236 DEBUG [master/83e80bf221ca:0:becomeActiveMaster-HFileCleaner.large.0-1733981931189 {}] cleaner.HFileCleaner(306): Exit Thread[master/83e80bf221ca:0:becomeActiveMaster-HFileCleaner.large.0-1733981931189,5,FailOnTimeoutGroup] 2024-12-12T05:42:01,236 INFO [M:0;83e80bf221ca:34751 {}] hbase.ChoreService(370): Chore service for: master/83e80bf221ca:0 had [] on shutdown 2024-12-12T05:42:01,236 DEBUG [M:0;83e80bf221ca:34751 {}] master.HMaster(1733): Stopping service threads 2024-12-12T05:42:01,237 INFO [M:0;83e80bf221ca:34751 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-12T05:42:01,237 ERROR [M:0;83e80bf221ca:34751 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Client (59733779) connection to localhost/127.0.0.1:45813 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:45813,5,PEWorkerGroup] 2024-12-12T05:42:01,238 INFO [M:0;83e80bf221ca:34751 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-12T05:42:01,238 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-12T05:42:01,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-12T05:42:01,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T05:42:01,243 DEBUG [M:0;83e80bf221ca:34751 {}] zookeeper.ZKUtil(347): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-12T05:42:01,243 WARN [M:0;83e80bf221ca:34751 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-12T05:42:01,243 INFO [M:0;83e80bf221ca:34751 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-12T05:42:01,243 INFO [M:0;83e80bf221ca:34751 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-12T05:42:01,243 DEBUG [M:0;83e80bf221ca:34751 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-12T05:42:01,243 INFO [M:0;83e80bf221ca:34751 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T05:42:01,243 DEBUG [M:0;83e80bf221ca:34751 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T05:42:01,243 DEBUG [M:0;83e80bf221ca:34751 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-12T05:42:01,243 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-12T05:42:01,243 DEBUG [M:0;83e80bf221ca:34751 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T05:42:01,243 INFO [M:0;83e80bf221ca:34751 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=737.90 KB heapSize=905.37 KB 2024-12-12T05:42:01,258 DEBUG [M:0;83e80bf221ca:34751 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c5b6300b1a27425295d7629b6b88939a is 82, key is hbase:meta,,1/info:regioninfo/1733981932608/Put/seqid=0 2024-12-12T05:42:01,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742426_1602 (size=5672) 2024-12-12T05:42:01,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x10018bf93040001, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T05:42:01,326 INFO [RS:0;83e80bf221ca:46457 {}] regionserver.HRegionServer(1307): Exiting; stopping=83e80bf221ca,46457,1733981928566; zookeeper connection closed. 2024-12-12T05:42:01,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46457-0x10018bf93040001, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T05:42:01,327 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7c190a29 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7c190a29 2024-12-12T05:42:01,327 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-12T05:42:01,663 INFO [M:0;83e80bf221ca:34751 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2060 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c5b6300b1a27425295d7629b6b88939a 2024-12-12T05:42:01,691 DEBUG [M:0;83e80bf221ca:34751 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4d777bfd5b444d2b90d51e9fc38e8f97 is 2283, key is \x00\x00\x00\x00\x00\x00\x00,/proc:d/1733981966291/Put/seqid=0 2024-12-12T05:42:01,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742427_1603 (size=43315) 2024-12-12T05:42:02,095 INFO [M:0;83e80bf221ca:34751 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=737.35 KB at sequenceid=2060 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4d777bfd5b444d2b90d51e9fc38e8f97 2024-12-12T05:42:02,102 INFO [M:0;83e80bf221ca:34751 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 4d777bfd5b444d2b90d51e9fc38e8f97 2024-12-12T05:42:02,122 DEBUG [M:0;83e80bf221ca:34751 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1cc63605d46541768a6f4617e61c4746 is 69, key is 83e80bf221ca,46457,1733981928566/rs:state/1733981931209/Put/seqid=0 2024-12-12T05:42:02,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073742428_1604 (size=5156) 2024-12-12T05:42:02,527 INFO [M:0;83e80bf221ca:34751 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2060 (bloomFilter=true), to=hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1cc63605d46541768a6f4617e61c4746 2024-12-12T05:42:02,549 DEBUG [M:0;83e80bf221ca:34751 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c5b6300b1a27425295d7629b6b88939a as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c5b6300b1a27425295d7629b6b88939a 2024-12-12T05:42:02,551 INFO [M:0;83e80bf221ca:34751 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c5b6300b1a27425295d7629b6b88939a, entries=8, sequenceid=2060, filesize=5.5 K 2024-12-12T05:42:02,552 DEBUG [M:0;83e80bf221ca:34751 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4d777bfd5b444d2b90d51e9fc38e8f97 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4d777bfd5b444d2b90d51e9fc38e8f97 2024-12-12T05:42:02,554 INFO [M:0;83e80bf221ca:34751 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 4d777bfd5b444d2b90d51e9fc38e8f97 2024-12-12T05:42:02,555 INFO [M:0;83e80bf221ca:34751 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4d777bfd5b444d2b90d51e9fc38e8f97, entries=169, sequenceid=2060, filesize=42.3 K 2024-12-12T05:42:02,555 DEBUG [M:0;83e80bf221ca:34751 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1cc63605d46541768a6f4617e61c4746 as hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1cc63605d46541768a6f4617e61c4746 2024-12-12T05:42:02,558 INFO [M:0;83e80bf221ca:34751 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45813/user/jenkins/test-data/0e1346bf-1d11-bc69-33a0-35988bd10b5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1cc63605d46541768a6f4617e61c4746, entries=1, sequenceid=2060, filesize=5.0 K 2024-12-12T05:42:02,559 INFO [M:0;83e80bf221ca:34751 {}] regionserver.HRegion(3040): Finished flush of dataSize ~737.90 KB/755611, heapSize ~905.07 KB/926792, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1315ms, sequenceid=2060, compaction requested=false 2024-12-12T05:42:02,560 INFO [M:0;83e80bf221ca:34751 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T05:42:02,560 DEBUG [M:0;83e80bf221ca:34751 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-12T05:42:02,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43689 is added to blk_1073741830_1006 (size=890710) 2024-12-12T05:42:02,563 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-12T05:42:02,563 INFO [M:0;83e80bf221ca:34751 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-12T05:42:02,563 INFO [M:0;83e80bf221ca:34751 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:34751 2024-12-12T05:42:02,581 DEBUG [M:0;83e80bf221ca:34751 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/83e80bf221ca,34751,1733981927819 already deleted, retry=false 2024-12-12T05:42:02,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T05:42:02,693 INFO [M:0;83e80bf221ca:34751 {}] regionserver.HRegionServer(1307): Exiting; stopping=83e80bf221ca,34751,1733981927819; zookeeper connection closed. 2024-12-12T05:42:02,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10018bf93040000, quorum=127.0.0.1:60303, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T05:42:02,704 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f79ec76{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T05:42:02,708 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T05:42:02,708 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T05:42:02,708 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-12T05:42:02,708 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/hadoop.log.dir/,STOPPED} 2024-12-12T05:42:02,710 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-12T05:42:02,710 WARN [BP-341374527-172.17.0.2-1733981924895 heartbeating to localhost/127.0.0.1:45813 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-12T05:42:02,710 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-12T05:42:02,710 WARN [BP-341374527-172.17.0.2-1733981924895 heartbeating to localhost/127.0.0.1:45813 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-341374527-172.17.0.2-1733981924895 (Datanode Uuid 0dcc0832-ff2b-4f87-9ae9-86a479069cdd) service to localhost/127.0.0.1:45813 2024-12-12T05:42:02,712 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/cluster_6310ccda-7b0a-c91e-05fe-5f1f04ecede1/dfs/data/data1/current/BP-341374527-172.17.0.2-1733981924895 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T05:42:02,712 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/cluster_6310ccda-7b0a-c91e-05fe-5f1f04ecede1/dfs/data/data2/current/BP-341374527-172.17.0.2-1733981924895 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T05:42:02,712 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-12T05:42:02,718 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-12T05:42:02,718 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T05:42:02,718 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T05:42:02,719 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-12T05:42:02,719 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f17902f0-9982-2d08-5527-80d62330e2a7/hadoop.log.dir/,STOPPED} 2024-12-12T05:42:02,734 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-12T05:42:02,841 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down